From 46e5ac9672f2d898d44f7d89349a5faf54a300b8 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 15 Jan 2026 18:54:42 +0800 Subject: [PATCH 001/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E5=AF=B9?= =?UTF-8?q?=E9=BD=90=20Claude=20OAuth=20=E8=AF=B7=E6=B1=82=E9=80=82?= =?UTF-8?q?=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/pkg/claude/constants.go | 44 +- backend/internal/service/gateway_service.go | 454 ++++++++++++++++++- backend/internal/service/identity_service.go | 8 +- 3 files changed, 481 insertions(+), 25 deletions(-) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index d1a56a84..15144881 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -25,15 +25,15 @@ const APIKeyHaikuBetaHeader = BetaInterleavedThinking // DefaultHeaders 是 Claude Code 客户端默认请求头。 var DefaultHeaders = map[string]string{ - "User-Agent": "claude-cli/2.0.62 (external, cli)", + "User-Agent": "claude-cli/2.1.2 (external, cli)", "X-Stainless-Lang": "js", - "X-Stainless-Package-Version": "0.52.0", + "X-Stainless-Package-Version": "0.70.0", "X-Stainless-OS": "Linux", "X-Stainless-Arch": "x64", "X-Stainless-Runtime": "node", - "X-Stainless-Runtime-Version": "v22.14.0", + "X-Stainless-Runtime-Version": "v24.3.0", "X-Stainless-Retry-Count": "0", - "X-Stainless-Timeout": "60", + "X-Stainless-Timeout": "600", "X-App": "cli", "Anthropic-Dangerous-Direct-Browser-Access": "true", } @@ -79,3 +79,39 @@ func DefaultModelIDs() []string { // DefaultTestModel 测试时使用的默认模型 const DefaultTestModel = "claude-sonnet-4-5-20250929" + +// ModelIDOverrides Claude OAuth 请求需要的模型 ID 映射 +var ModelIDOverrides = map[string]string{ + "claude-sonnet-4-5": "claude-sonnet-4-5-20250929", + "claude-opus-4-5": "claude-opus-4-5-20251101", + "claude-haiku-4-5": "claude-haiku-4-5-20251001", +} + +// ModelIDReverseOverrides 用于将上游模型 ID 还原为短名 +var ModelIDReverseOverrides = map[string]string{ + "claude-sonnet-4-5-20250929": "claude-sonnet-4-5", + "claude-opus-4-5-20251101": "claude-opus-4-5", + "claude-haiku-4-5-20251001": "claude-haiku-4-5", +} + +// NormalizeModelID 根据 Claude OAuth 规则映射模型 +func NormalizeModelID(id string) string { + if id == "" { + return id + } + if mapped, ok := ModelIDOverrides[id]; ok { + return mapped + } + return id +} + +// DenormalizeModelID 将上游模型 ID 转换为短名 +func DenormalizeModelID(id string) string { + if id == "" { + return id + } + if mapped, ok := ModelIDReverseOverrides[id]; ok { + return mapped + } + return id +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index d5eb0e52..1d29b3fd 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -17,12 +17,14 @@ import ( "strings" "sync/atomic" "time" + "unicode" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" + "github.com/google/uuid" "github.com/tidwall/gjson" "github.com/tidwall/sjson" @@ -44,6 +46,36 @@ var ( sseDataRe = regexp.MustCompile(`^data:\s*`) sessionIDRegex = regexp.MustCompile(`session_([a-f0-9-]{36})`) claudeCliUserAgentRe = regexp.MustCompile(`^claude-cli/\d+\.\d+\.\d+`) + toolPrefixRe = regexp.MustCompile(`(?i)^(?:oc_|mcp_)`) + toolNameBoundaryRe = regexp.MustCompile(`[^a-zA-Z0-9]+`) + toolNameCamelRe = regexp.MustCompile(`([a-z0-9])([A-Z])`) + + claudeToolNameOverrides = map[string]string{ + "bash": "Bash", + "read": "Read", + "edit": "Edit", + "write": "Write", + "task": "Task", + "glob": "Glob", + "grep": "Grep", + "webfetch": "WebFetch", + "websearch": "WebSearch", + "todowrite": "TodoWrite", + "question": "AskUserQuestion", + } + openCodeToolOverrides = map[string]string{ + "Bash": "bash", + "Read": "read", + "Edit": "edit", + "Write": "write", + "Task": "task", + "Glob": "glob", + "Grep": "grep", + "WebFetch": "webfetch", + "WebSearch": "websearch", + "TodoWrite": "todowrite", + "AskUserQuestion": "question", + } // claudeCodePromptPrefixes 用于检测 Claude Code 系统提示词的前缀列表 // 支持多种变体:标准版、Agent SDK 版、Explore Agent 版、Compact 版等 @@ -346,6 +378,268 @@ func (s *GatewayService) replaceModelInBody(body []byte, newModel string) []byte return newBody } +type claudeOAuthNormalizeOptions struct { + injectMetadata bool + metadataUserID string + stripSystemCacheControl bool +} + +func stripToolPrefix(value string) string { + if value == "" { + return value + } + return toolPrefixRe.ReplaceAllString(value, "") +} + +func toPascalCase(value string) string { + if value == "" { + return value + } + normalized := toolNameBoundaryRe.ReplaceAllString(value, " ") + tokens := make([]string, 0) + for _, token := range strings.Fields(normalized) { + expanded := toolNameCamelRe.ReplaceAllString(token, "$1 $2") + parts := strings.Fields(expanded) + if len(parts) > 0 { + tokens = append(tokens, parts...) + } + } + if len(tokens) == 0 { + return value + } + var builder strings.Builder + for _, token := range tokens { + lower := strings.ToLower(token) + if lower == "" { + continue + } + runes := []rune(lower) + runes[0] = unicode.ToUpper(runes[0]) + builder.WriteString(string(runes)) + } + return builder.String() +} + +func toSnakeCase(value string) string { + if value == "" { + return value + } + output := toolNameCamelRe.ReplaceAllString(value, "$1_$2") + output = toolNameBoundaryRe.ReplaceAllString(output, "_") + output = strings.Trim(output, "_") + return strings.ToLower(output) +} + +func normalizeToolNameForClaude(name string, cache map[string]string) string { + if name == "" { + return name + } + stripped := stripToolPrefix(name) + mapped, ok := claudeToolNameOverrides[strings.ToLower(stripped)] + if !ok { + mapped = toPascalCase(stripped) + } + if mapped != "" && cache != nil && mapped != stripped { + cache[mapped] = stripped + } + if mapped == "" { + return stripped + } + return mapped +} + +func normalizeToolNameForOpenCode(name string, cache map[string]string) string { + if name == "" { + return name + } + if cache != nil { + if mapped, ok := cache[name]; ok { + return mapped + } + } + if mapped, ok := openCodeToolOverrides[name]; ok { + return mapped + } + return toSnakeCase(name) +} + +func stripCacheControlFromSystemBlocks(system any) bool { + blocks, ok := system.([]any) + if !ok { + return false + } + changed := false + for _, item := range blocks { + block, ok := item.(map[string]any) + if !ok { + continue + } + if _, exists := block["cache_control"]; !exists { + continue + } + if text, ok := block["text"].(string); ok && text == claudeCodeSystemPrompt { + continue + } + delete(block, "cache_control") + changed = true + } + return changed +} + +func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAuthNormalizeOptions) ([]byte, string, map[string]string) { + if len(body) == 0 { + return body, modelID, nil + } + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return body, modelID, nil + } + + toolNameMap := make(map[string]string) + + if rawModel, ok := req["model"].(string); ok { + normalized := claude.NormalizeModelID(rawModel) + if normalized != rawModel { + req["model"] = normalized + modelID = normalized + } + } + + if rawTools, exists := req["tools"]; exists { + switch tools := rawTools.(type) { + case []any: + for idx, tool := range tools { + toolMap, ok := tool.(map[string]any) + if !ok { + continue + } + if name, ok := toolMap["name"].(string); ok { + normalized := normalizeToolNameForClaude(name, toolNameMap) + if normalized != "" && normalized != name { + toolMap["name"] = normalized + } + } + tools[idx] = toolMap + } + req["tools"] = tools + case map[string]any: + normalizedTools := make(map[string]any, len(tools)) + for name, value := range tools { + normalized := normalizeToolNameForClaude(name, toolNameMap) + if normalized == "" { + normalized = name + } + if toolMap, ok := value.(map[string]any); ok { + if toolName, ok := toolMap["name"].(string); ok { + mappedName := normalizeToolNameForClaude(toolName, toolNameMap) + if mappedName != "" && mappedName != toolName { + toolMap["name"] = mappedName + } + } else if normalized != name { + toolMap["name"] = normalized + } + normalizedTools[normalized] = toolMap + continue + } + normalizedTools[normalized] = value + } + req["tools"] = normalizedTools + } + } else { + req["tools"] = []any{} + } + + if messages, ok := req["messages"].([]any); ok { + for _, msg := range messages { + msgMap, ok := msg.(map[string]any) + if !ok { + continue + } + content, ok := msgMap["content"].([]any) + if !ok { + continue + } + for _, block := range content { + blockMap, ok := block.(map[string]any) + if !ok { + continue + } + if blockType, _ := blockMap["type"].(string); blockType != "tool_use" { + continue + } + if name, ok := blockMap["name"].(string); ok { + normalized := normalizeToolNameForClaude(name, toolNameMap) + if normalized != "" && normalized != name { + blockMap["name"] = normalized + } + } + } + } + } + + if opts.stripSystemCacheControl { + if system, ok := req["system"]; ok { + _ = stripCacheControlFromSystemBlocks(system) + } + } + + if opts.injectMetadata && opts.metadataUserID != "" { + metadata, ok := req["metadata"].(map[string]any) + if !ok { + metadata = map[string]any{} + req["metadata"] = metadata + } + if existing, ok := metadata["user_id"].(string); !ok || existing == "" { + metadata["user_id"] = opts.metadataUserID + } + } + + if _, ok := req["temperature"]; ok { + delete(req, "temperature") + } + if _, ok := req["tool_choice"]; ok { + delete(req, "tool_choice") + } + + newBody, err := json.Marshal(req) + if err != nil { + return body, modelID, toolNameMap + } + return newBody, modelID, toolNameMap +} + +func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account *Account, fp *Fingerprint) string { + if parsed == nil || fp == nil || fp.ClientID == "" { + return "" + } + if parsed.MetadataUserID != "" { + return "" + } + accountUUID := account.GetExtraString("account_uuid") + if accountUUID == "" { + return "" + } + sessionHash := s.GenerateSessionHash(parsed) + sessionID := uuid.NewString() + if sessionHash != "" { + seed := fmt.Sprintf("%d::%s", account.ID, sessionHash) + sessionID = generateSessionUUID(seed) + } + return fmt.Sprintf("user_%s_account_%s_session_%s", fp.ClientID, accountUUID, sessionID) +} + +func generateSessionUUID(seed string) string { + if seed == "" { + return uuid.NewString() + } + hash := sha256.Sum256([]byte(seed)) + bytes := hash[:16] + bytes[6] = (bytes[6] & 0x0f) | 0x40 + bytes[8] = (bytes[8] & 0x3f) | 0x80 + return fmt.Sprintf("%x-%x-%x-%x-%x", + bytes[0:4], bytes[4:6], bytes[6:8], bytes[8:10], bytes[10:16]) +} + // SelectAccount 选择账号(粘性会话+优先级) func (s *GatewayService) SelectAccount(ctx context.Context, groupID *int64, sessionHash string) (*Account, error) { return s.SelectAccountForModel(ctx, groupID, sessionHash, "") @@ -1423,21 +1717,36 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A body := parsed.Body reqModel := parsed.Model reqStream := parsed.Stream + originalModel := reqModel + var toolNameMap map[string]string - // 智能注入 Claude Code 系统提示词(仅 OAuth/SetupToken 账号需要) - // 条件:1) OAuth/SetupToken 账号 2) 不是 Claude Code 客户端 3) 不是 Haiku 模型 4) system 中还没有 Claude Code 提示词 - if account.IsOAuth() && - !isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) && - !strings.Contains(strings.ToLower(reqModel), "haiku") && - !systemIncludesClaudeCodePrompt(parsed.System) { - body = injectClaudeCodePrompt(body, parsed.System) + if account.IsOAuth() { + // 智能注入 Claude Code 系统提示词(仅 OAuth/SetupToken 账号需要) + // 条件:1) OAuth/SetupToken 账号 2) 不是 Claude Code 客户端 3) 不是 Haiku 模型 4) system 中还没有 Claude Code 提示词 + if !isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) && + !strings.Contains(strings.ToLower(reqModel), "haiku") && + !systemIncludesClaudeCodePrompt(parsed.System) { + body = injectClaudeCodePrompt(body, parsed.System) + } + + normalizeOpts := claudeOAuthNormalizeOptions{stripSystemCacheControl: true} + if s.identityService != nil { + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + if err == nil && fp != nil { + if metadataUserID := s.buildOAuthMetadataUserID(parsed, account, fp); metadataUserID != "" { + normalizeOpts.injectMetadata = true + normalizeOpts.metadataUserID = metadataUserID + } + } + } + + body, reqModel, toolNameMap = normalizeClaudeOAuthRequestBody(body, reqModel, normalizeOpts) } // 强制执行 cache_control 块数量限制(最多 4 个) body = enforceCacheControlLimit(body) // 应用模型映射(仅对apikey类型账号) - originalModel := reqModel if account.Type == AccountTypeAPIKey { mappedModel := account.GetMappedModel(reqModel) if mappedModel != reqModel { @@ -1465,7 +1774,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A retryStart := time.Now() for attempt := 1; attempt <= maxRetryAttempts; attempt++ { // 构建上游请求(每次重试需要重新构建,因为请求体需要重新读取) - upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel) + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel, reqStream) if err != nil { return nil, err } @@ -1541,7 +1850,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // also downgrade tool_use/tool_result blocks to text. filteredBody := FilterThinkingBlocksForRetry(body) - retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) + retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, reqStream) if buildErr == nil { retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr == nil { @@ -1572,7 +1881,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A if looksLikeToolSignatureError(msg2) && time.Since(retryStart) < maxRetryElapsed { log.Printf("Account %d: signature retry still failing and looks tool-related, retrying with tool blocks downgraded", account.ID) filteredBody2 := FilterSignatureSensitiveBlocksForRetry(body) - retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel) + retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel, reqStream) if buildErr2 == nil { retryResp2, retryErr2 := s.httpUpstream.Do(retryReq2, proxyURL, account.ID, account.Concurrency) if retryErr2 == nil { @@ -1785,7 +2094,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A var firstTokenMs *int var clientDisconnect bool if reqStream { - streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel) + streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel, toolNameMap) if err != nil { if err.Error() == "have error in stream" { return nil, &UpstreamFailoverError{ @@ -1798,7 +2107,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A firstTokenMs = streamResult.firstTokenMs clientDisconnect = streamResult.clientDisconnect } else { - usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel) + usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel, toolNameMap) if err != nil { return nil, err } @@ -1815,7 +2124,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A }, nil } -func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string) (*http.Request, error) { +func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, reqStream bool) (*http.Request, error) { // 确定目标URL targetURL := claudeAPIURL if account.Type == AccountTypeAPIKey { @@ -1884,6 +2193,9 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } + if tokenType == "oauth" { + applyClaudeOAuthHeaderDefaults(req, reqStream) + } // 处理anthropic-beta header(OAuth账号需要特殊处理) if tokenType == "oauth" { @@ -1966,6 +2278,26 @@ func defaultAPIKeyBetaHeader(body []byte) string { return claude.APIKeyBetaHeader } +func applyClaudeOAuthHeaderDefaults(req *http.Request, isStream bool) { + if req == nil { + return + } + if req.Header.Get("accept") == "" { + req.Header.Set("accept", "application/json") + } + for key, value := range claude.DefaultHeaders { + if value == "" { + continue + } + if req.Header.Get(key) == "" { + req.Header.Set(key, value) + } + } + if isStream && req.Header.Get("x-stainless-helper-method") == "" { + req.Header.Set("x-stainless-helper-method", "stream") + } +} + func truncateForLog(b []byte, maxBytes int) string { if maxBytes <= 0 { maxBytes = 2048 @@ -2246,7 +2578,7 @@ type streamingResult struct { clientDisconnect bool // 客户端是否在流式传输过程中断开 } -func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string) (*streamingResult, error) { +func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string, toolNameMap map[string]string) (*streamingResult, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -2339,6 +2671,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } needModelReplace := originalModel != mappedModel + rewriteTools := account.IsOAuth() clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage for { @@ -2380,11 +2713,14 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http // Extract data from SSE line (supports both "data: " and "data:" formats) var data string if sseDataRe.MatchString(line) { - data = sseDataRe.ReplaceAllString(line, "") // 如果有模型映射,替换响应中的model字段 if needModelReplace { line = s.replaceModelInSSELine(line, mappedModel, originalModel) } + if rewriteTools { + line = s.replaceToolNamesInSSELine(line, toolNameMap) + } + data = sseDataRe.ReplaceAllString(line, "") } // 写入客户端(统一处理 data 行和非 data 行) @@ -2467,6 +2803,61 @@ func (s *GatewayService) replaceModelInSSELine(line, fromModel, toModel string) return "data: " + string(newData) } +func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { + switch v := value.(type) { + case map[string]any: + changed := false + if blockType, _ := v["type"].(string); blockType == "tool_use" { + if name, ok := v["name"].(string); ok { + mapped := normalizeToolNameForOpenCode(name, toolNameMap) + if mapped != name { + v["name"] = mapped + changed = true + } + } + } + for _, item := range v { + if rewriteToolNamesInValue(item, toolNameMap) { + changed = true + } + } + return changed + case []any: + changed := false + for _, item := range v { + if rewriteToolNamesInValue(item, toolNameMap) { + changed = true + } + } + return changed + default: + return false + } +} + +func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[string]string) string { + if !sseDataRe.MatchString(line) { + return line + } + data := sseDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + return line + } + + var event map[string]any + if err := json.Unmarshal([]byte(data), &event); err != nil { + return line + } + if !rewriteToolNamesInValue(event, toolNameMap) { + return line + } + newData, err := json.Marshal(event) + if err != nil { + return line + } + return "data: " + string(newData) +} + func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { // 解析message_start获取input tokens(标准Claude API格式) var msgStart struct { @@ -2508,7 +2899,7 @@ func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { } } -func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string) (*ClaudeUsage, error) { +func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string, toolNameMap map[string]string) (*ClaudeUsage, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -2529,6 +2920,9 @@ func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *h if originalModel != mappedModel { body = s.replaceModelInResponseBody(body, mappedModel, originalModel) } + if account.IsOAuth() { + body = s.replaceToolNamesInResponseBody(body, toolNameMap) + } responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) @@ -2566,6 +2960,24 @@ func (s *GatewayService) replaceModelInResponseBody(body []byte, fromModel, toMo return newBody } +func (s *GatewayService) replaceToolNamesInResponseBody(body []byte, toolNameMap map[string]string) []byte { + if len(body) == 0 { + return body + } + var resp map[string]any + if err := json.Unmarshal(body, &resp); err != nil { + return body + } + if !rewriteToolNamesInValue(resp, toolNameMap) { + return body + } + newBody, err := json.Marshal(resp) + if err != nil { + return body + } + return newBody +} + // RecordUsageInput 记录使用量的输入参数 type RecordUsageInput struct { Result *ForwardResult @@ -2729,6 +3141,11 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, body := parsed.Body reqModel := parsed.Model + if account.IsOAuth() { + normalizeOpts := claudeOAuthNormalizeOptions{stripSystemCacheControl: true} + body, reqModel, _ = normalizeClaudeOAuthRequestBody(body, reqModel, normalizeOpts) + } + // Antigravity 账户不支持 count_tokens 转发,直接返回空值 if account.Platform == PlatformAntigravity { c.JSON(http.StatusOK, gin.H{"input_tokens": 0}) @@ -2917,6 +3334,9 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } + if tokenType == "oauth" { + applyClaudeOAuthHeaderDefaults(req, false) + } // OAuth 账号:处理 anthropic-beta header if tokenType == "oauth" { diff --git a/backend/internal/service/identity_service.go b/backend/internal/service/identity_service.go index 1ffa8057..4ab1ab96 100644 --- a/backend/internal/service/identity_service.go +++ b/backend/internal/service/identity_service.go @@ -24,13 +24,13 @@ var ( // 默认指纹值(当客户端未提供时使用) var defaultFingerprint = Fingerprint{ - UserAgent: "claude-cli/2.0.62 (external, cli)", + UserAgent: "claude-cli/2.1.2 (external, cli)", StainlessLang: "js", - StainlessPackageVersion: "0.52.0", + StainlessPackageVersion: "0.70.0", StainlessOS: "Linux", StainlessArch: "x64", StainlessRuntime: "node", - StainlessRuntimeVersion: "v22.14.0", + StainlessRuntimeVersion: "v24.3.0", } // Fingerprint represents account fingerprint data @@ -230,7 +230,7 @@ func generateUUIDFromSeed(seed string) string { } // parseUserAgentVersion 解析user-agent版本号 -// 例如:claude-cli/2.0.62 -> (2, 0, 62) +// 例如:claude-cli/2.1.2 -> (2, 1, 2) func parseUserAgentVersion(ua string) (major, minor, patch int, ok bool) { // 匹配 xxx/x.y.z 格式 matches := userAgentVersionRegex.FindStringSubmatch(ua) From c579439c1ea42636ed7e7447e133a98bedfa7091 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 15 Jan 2026 19:17:07 +0800 Subject: [PATCH 002/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E5=8C=BA?= =?UTF-8?q?=E5=88=86=20Claude=20Code=20OAuth=20=E9=80=82=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/handler/gateway_handler.go | 3 + backend/internal/pkg/claude/constants.go | 4 + backend/internal/service/gateway_service.go | 110 +++++++++++++++----- 3 files changed, 90 insertions(+), 27 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index b60618a8..91d590bf 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -707,6 +707,9 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { return } + // 检查是否为 Claude Code 客户端,设置到 context 中 + SetClaudeCodeClientContext(c, body) + setOpsRequestContext(c, "", false, body) parsedReq, err := service.ParseGatewayRequest(body) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index 15144881..f60412c2 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -9,11 +9,15 @@ const ( BetaClaudeCode = "claude-code-20250219" BetaInterleavedThinking = "interleaved-thinking-2025-05-14" BetaFineGrainedToolStreaming = "fine-grained-tool-streaming-2025-05-14" + BetaTokenCounting = "token-counting-2024-11-01" ) // DefaultBetaHeader Claude Code 客户端默认的 anthropic-beta header const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming +// CountTokensBetaHeader count_tokens 请求使用的 anthropic-beta header +const CountTokensBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaTokenCounting + // HaikuBetaHeader Haiku 模型使用的 anthropic-beta header(不需要 claude-code beta) const HaikuBetaHeader = BetaOAuth + "," + BetaInterleavedThinking diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 1d29b3fd..904b5acd 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -49,6 +49,8 @@ var ( toolPrefixRe = regexp.MustCompile(`(?i)^(?:oc_|mcp_)`) toolNameBoundaryRe = regexp.MustCompile(`[^a-zA-Z0-9]+`) toolNameCamelRe = regexp.MustCompile(`([a-z0-9])([A-Z])`) + toolNameFieldRe = regexp.MustCompile(`"name"\s*:\s*"([^"]+)"`) + modelFieldRe = regexp.MustCompile(`"model"\s*:\s*"([^"]+)"`) claudeToolNameOverrides = map[string]string{ "bash": "Bash", @@ -1458,6 +1460,16 @@ func isClaudeCodeClient(userAgent string, metadataUserID string) bool { return claudeCliUserAgentRe.MatchString(userAgent) } +func isClaudeCodeRequest(ctx context.Context, c *gin.Context, parsed *ParsedRequest) bool { + if IsClaudeCodeClient(ctx) { + return true + } + if parsed == nil || c == nil { + return false + } + return isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) +} + // systemIncludesClaudeCodePrompt 检查 system 中是否已包含 Claude Code 提示词 // 使用前缀匹配支持多种变体(标准版、Agent SDK 版等) func systemIncludesClaudeCodePrompt(system any) bool { @@ -1720,11 +1732,13 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A originalModel := reqModel var toolNameMap map[string]string - if account.IsOAuth() { + isClaudeCode := isClaudeCodeRequest(ctx, c, parsed) + shouldMimicClaudeCode := account.IsOAuth() && !isClaudeCode + + if shouldMimicClaudeCode { // 智能注入 Claude Code 系统提示词(仅 OAuth/SetupToken 账号需要) // 条件:1) OAuth/SetupToken 账号 2) 不是 Claude Code 客户端 3) 不是 Haiku 模型 4) system 中还没有 Claude Code 提示词 - if !isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) && - !strings.Contains(strings.ToLower(reqModel), "haiku") && + if !strings.Contains(strings.ToLower(reqModel), "haiku") && !systemIncludesClaudeCodePrompt(parsed.System) { body = injectClaudeCodePrompt(body, parsed.System) } @@ -1774,7 +1788,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A retryStart := time.Now() for attempt := 1; attempt <= maxRetryAttempts; attempt++ { // 构建上游请求(每次重试需要重新构建,因为请求体需要重新读取) - upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel, reqStream) + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel, reqStream, shouldMimicClaudeCode) if err != nil { return nil, err } @@ -1850,7 +1864,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // also downgrade tool_use/tool_result blocks to text. filteredBody := FilterThinkingBlocksForRetry(body) - retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, reqStream) + retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, reqStream, shouldMimicClaudeCode) if buildErr == nil { retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr == nil { @@ -1881,7 +1895,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A if looksLikeToolSignatureError(msg2) && time.Since(retryStart) < maxRetryElapsed { log.Printf("Account %d: signature retry still failing and looks tool-related, retrying with tool blocks downgraded", account.ID) filteredBody2 := FilterSignatureSensitiveBlocksForRetry(body) - retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel, reqStream) + retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel, reqStream, shouldMimicClaudeCode) if buildErr2 == nil { retryResp2, retryErr2 := s.httpUpstream.Do(retryReq2, proxyURL, account.ID, account.Concurrency) if retryErr2 == nil { @@ -2094,7 +2108,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A var firstTokenMs *int var clientDisconnect bool if reqStream { - streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel, toolNameMap) + streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel, toolNameMap, shouldMimicClaudeCode) if err != nil { if err.Error() == "have error in stream" { return nil, &UpstreamFailoverError{ @@ -2107,7 +2121,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A firstTokenMs = streamResult.firstTokenMs clientDisconnect = streamResult.clientDisconnect } else { - usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel, toolNameMap) + usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel, toolNameMap, shouldMimicClaudeCode) if err != nil { return nil, err } @@ -2124,7 +2138,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A }, nil } -func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, reqStream bool) (*http.Request, error) { +func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, reqStream bool, mimicClaudeCode bool) (*http.Request, error) { // 确定目标URL targetURL := claudeAPIURL if account.Type == AccountTypeAPIKey { @@ -2140,7 +2154,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // OAuth账号:应用统一指纹 var fingerprint *Fingerprint - if account.IsOAuth() && s.identityService != nil { + if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { // 1. 获取或创建指纹(包含随机生成的ClientID) fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err != nil { @@ -2193,12 +2207,12 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" { + if tokenType == "oauth" && mimicClaudeCode { applyClaudeOAuthHeaderDefaults(req, reqStream) } // 处理anthropic-beta header(OAuth账号需要特殊处理) - if tokenType == "oauth" { + if tokenType == "oauth" && mimicClaudeCode { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) @@ -2578,7 +2592,7 @@ type streamingResult struct { clientDisconnect bool // 客户端是否在流式传输过程中断开 } -func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string, toolNameMap map[string]string) (*streamingResult, error) { +func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string, toolNameMap map[string]string, mimicClaudeCode bool) (*streamingResult, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -2671,7 +2685,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } needModelReplace := originalModel != mappedModel - rewriteTools := account.IsOAuth() + rewriteTools := mimicClaudeCode clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage for { @@ -2835,6 +2849,37 @@ func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { } } +func replaceToolNamesInText(text string, toolNameMap map[string]string) string { + if text == "" { + return text + } + output := toolNameFieldRe.ReplaceAllStringFunc(text, func(match string) string { + submatches := toolNameFieldRe.FindStringSubmatch(match) + if len(submatches) < 2 { + return match + } + name := submatches[1] + mapped := normalizeToolNameForOpenCode(name, toolNameMap) + if mapped == name { + return match + } + return strings.Replace(match, name, mapped, 1) + }) + output = modelFieldRe.ReplaceAllStringFunc(output, func(match string) string { + submatches := modelFieldRe.FindStringSubmatch(match) + if len(submatches) < 2 { + return match + } + model := submatches[1] + mapped := claude.DenormalizeModelID(model) + if mapped == model { + return match + } + return strings.Replace(match, model, mapped, 1) + }) + return output +} + func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[string]string) string { if !sseDataRe.MatchString(line) { return line @@ -2846,7 +2891,11 @@ func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[ var event map[string]any if err := json.Unmarshal([]byte(data), &event); err != nil { - return line + replaced := replaceToolNamesInText(data, toolNameMap) + if replaced == data { + return line + } + return "data: " + replaced } if !rewriteToolNamesInValue(event, toolNameMap) { return line @@ -2899,7 +2948,7 @@ func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { } } -func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string, toolNameMap map[string]string) (*ClaudeUsage, error) { +func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string, toolNameMap map[string]string, mimicClaudeCode bool) (*ClaudeUsage, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -2920,7 +2969,7 @@ func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *h if originalModel != mappedModel { body = s.replaceModelInResponseBody(body, mappedModel, originalModel) } - if account.IsOAuth() { + if mimicClaudeCode { body = s.replaceToolNamesInResponseBody(body, toolNameMap) } @@ -2966,7 +3015,11 @@ func (s *GatewayService) replaceToolNamesInResponseBody(body []byte, toolNameMap } var resp map[string]any if err := json.Unmarshal(body, &resp); err != nil { - return body + replaced := replaceToolNamesInText(string(body), toolNameMap) + if replaced == string(body) { + return body + } + return []byte(replaced) } if !rewriteToolNamesInValue(resp, toolNameMap) { return body @@ -3141,7 +3194,10 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, body := parsed.Body reqModel := parsed.Model - if account.IsOAuth() { + isClaudeCode := isClaudeCodeRequest(ctx, c, parsed) + shouldMimicClaudeCode := account.IsOAuth() && !isClaudeCode + + if shouldMimicClaudeCode { normalizeOpts := claudeOAuthNormalizeOptions{stripSystemCacheControl: true} body, reqModel, _ = normalizeClaudeOAuthRequestBody(body, reqModel, normalizeOpts) } @@ -3172,7 +3228,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, } // 构建上游请求 - upstreamReq, err := s.buildCountTokensRequest(ctx, c, account, body, token, tokenType, reqModel) + upstreamReq, err := s.buildCountTokensRequest(ctx, c, account, body, token, tokenType, reqModel, shouldMimicClaudeCode) if err != nil { s.countTokensError(c, http.StatusInternalServerError, "api_error", "Failed to build request") return err @@ -3205,7 +3261,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, log.Printf("Account %d: detected thinking block signature error on count_tokens, retrying with filtered thinking blocks", account.ID) filteredBody := FilterThinkingBlocksForRetry(body) - retryReq, buildErr := s.buildCountTokensRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) + retryReq, buildErr := s.buildCountTokensRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, shouldMimicClaudeCode) if buildErr == nil { retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr == nil { @@ -3270,7 +3326,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, } // buildCountTokensRequest 构建 count_tokens 上游请求 -func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string) (*http.Request, error) { +func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, mimicClaudeCode bool) (*http.Request, error) { // 确定目标 URL targetURL := claudeAPICountTokensURL if account.Type == AccountTypeAPIKey { @@ -3285,7 +3341,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用统一指纹和重写 userID - if account.IsOAuth() && s.identityService != nil { + if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err == nil { accountUUID := account.GetExtraString("account_uuid") @@ -3320,7 +3376,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用指纹到请求头 - if account.IsOAuth() && s.identityService != nil { + if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if fp != nil { s.identityService.ApplyFingerprint(req, fp) @@ -3334,13 +3390,13 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" { + if tokenType == "oauth" && mimicClaudeCode { applyClaudeOAuthHeaderDefaults(req, false) } // OAuth 账号:处理 anthropic-beta header - if tokenType == "oauth" { - req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + if tokenType == "oauth" && mimicClaudeCode { + req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:与 messages 同步的按需 beta 注入(默认关闭) if requestNeedsBetaFeatures(body) { From 98b65e67f21189f441f92dec88ed40b3ba7e8561 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 15 Jan 2026 21:42:13 +0800 Subject: [PATCH 003/214] fix(gateway): avoid injecting invalid SSE on client cancel --- .../service/openai_gateway_service.go | 6 +++ .../service/openai_gateway_service_test.go | 37 +++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 04a90fdd..d49be282 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -1064,6 +1064,12 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil } if ev.err != nil { + // 客户端断开/取消请求时,上游读取往往会返回 context canceled。 + // /v1/responses 的 SSE 事件必须符合 OpenAI 协议;这里不注入自定义 error event,避免下游 SDK 解析失败。 + if errors.Is(ev.err, context.Canceled) || errors.Is(ev.err, context.DeadlineExceeded) { + log.Printf("Context canceled during streaming, returning collected usage") + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil + } if errors.Is(ev.err, bufio.ErrTooLong) { log.Printf("SSE line too long: account=%d max_size=%d error=%v", account.ID, maxLineSize, ev.err) sendErrorEvent("response_too_large") diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 42b88b7d..ead6e143 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -33,6 +33,11 @@ type stubConcurrencyCache struct { ConcurrencyCache } +type cancelReadCloser struct{} + +func (c cancelReadCloser) Read(p []byte) (int, error) { return 0, context.Canceled } +func (c cancelReadCloser) Close() error { return nil } + func (c stubConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { return true, nil } @@ -174,6 +179,38 @@ func TestOpenAIStreamingTimeout(t *testing.T) { } } +func TestOpenAIStreamingContextCanceledDoesNotInjectErrorEvent(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Gateway: config.GatewayConfig{ + StreamDataIntervalTimeout: 0, + StreamKeepaliveInterval: 0, + MaxLineSize: defaultMaxLineSize, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + c.Request = httptest.NewRequest(http.MethodPost, "/", nil).WithContext(ctx) + + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: cancelReadCloser{}, + Header: http.Header{}, + } + + _, err := svc.handleStreamingResponse(c.Request.Context(), resp, c, &Account{ID: 1}, time.Now(), "model", "model") + if err != nil { + t.Fatalf("expected nil error, got %v", err) + } + if strings.Contains(rec.Body.String(), "event: error") || strings.Contains(rec.Body.String(), "stream_read_error") { + t.Fatalf("expected no injected SSE error event, got %q", rec.Body.String()) + } +} + func TestOpenAIStreamingTooLong(t *testing.T) { gin.SetMode(gin.TestMode) cfg := &config.Config{ From c11f14f3a030c30846183704ccd6193785899bd4 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 15 Jan 2026 21:51:14 +0800 Subject: [PATCH 004/214] fix(gateway): drain upstream after client disconnect --- .../service/openai_gateway_service.go | 43 ++++++++++---- .../service/openai_gateway_service_test.go | 59 +++++++++++++++++++ 2 files changed, 91 insertions(+), 11 deletions(-) diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index d49be282..fb811e9e 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -1046,8 +1046,9 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp // 仅发送一次错误事件,避免多次写入导致协议混乱(写失败时尽力通知客户端) errorEventSent := false + clientDisconnected := false // 客户端断开后继续 drain 上游以收集 usage sendErrorEvent := func(reason string) { - if errorEventSent { + if errorEventSent || clientDisconnected { return } errorEventSent = true @@ -1070,6 +1071,11 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp log.Printf("Context canceled during streaming, returning collected usage") return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil } + // 客户端已断开时,上游出错仅影响体验,不影响计费;返回已收集 usage + if clientDisconnected { + log.Printf("Upstream read error after client disconnect: %v, returning collected usage", ev.err) + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil + } if errors.Is(ev.err, bufio.ErrTooLong) { log.Printf("SSE line too long: account=%d max_size=%d error=%v", account.ID, maxLineSize, ev.err) sendErrorEvent("response_too_large") @@ -1091,12 +1097,15 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp line = s.replaceModelInSSELine(line, mappedModel, originalModel) } - // Forward line - if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { - sendErrorEvent("write_failed") - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, err + // 写入客户端(客户端断开后继续 drain 上游) + if !clientDisconnected { + if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { + clientDisconnected = true + log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") + } else { + flusher.Flush() + } } - flusher.Flush() // Record first token time if firstTokenMs == nil && data != "" && data != "[DONE]" { @@ -1106,11 +1115,14 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp s.parseSSEUsage(data, usage) } else { // Forward non-data lines as-is - if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { - sendErrorEvent("write_failed") - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, err + if !clientDisconnected { + if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { + clientDisconnected = true + log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") + } else { + flusher.Flush() + } } - flusher.Flush() } case <-intervalCh: @@ -1118,6 +1130,10 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp if time.Since(lastRead) < streamInterval { continue } + if clientDisconnected { + log.Printf("Upstream timeout after client disconnect, returning collected usage") + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil + } log.Printf("Stream data interval timeout: account=%d model=%s interval=%s", account.ID, originalModel, streamInterval) // 处理流超时,可能标记账户为临时不可调度或错误状态 if s.rateLimitService != nil { @@ -1127,11 +1143,16 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") case <-keepaliveCh: + if clientDisconnected { + continue + } if time.Since(lastDataAt) < keepaliveInterval { continue } if _, err := fmt.Fprint(w, ":\n\n"); err != nil { - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, err + clientDisconnected = true + log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") + continue } flusher.Flush() } diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index ead6e143..3ec37544 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -38,6 +38,20 @@ type cancelReadCloser struct{} func (c cancelReadCloser) Read(p []byte) (int, error) { return 0, context.Canceled } func (c cancelReadCloser) Close() error { return nil } +type failingGinWriter struct { + gin.ResponseWriter + failAfter int + writes int +} + +func (w *failingGinWriter) Write(p []byte) (int, error) { + if w.writes >= w.failAfter { + return 0, errors.New("write failed") + } + w.writes++ + return w.ResponseWriter.Write(p) +} + func (c stubConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { return true, nil } @@ -211,6 +225,51 @@ func TestOpenAIStreamingContextCanceledDoesNotInjectErrorEvent(t *testing.T) { } } +func TestOpenAIStreamingClientDisconnectDrainsUpstreamUsage(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Gateway: config.GatewayConfig{ + StreamDataIntervalTimeout: 0, + StreamKeepaliveInterval: 0, + MaxLineSize: defaultMaxLineSize, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + c.Writer = &failingGinWriter{ResponseWriter: c.Writer, failAfter: 0} + + pr, pw := io.Pipe() + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: pr, + Header: http.Header{}, + } + + go func() { + defer func() { _ = pw.Close() }() + _, _ = pw.Write([]byte("data: {\"type\":\"response.in_progress\",\"response\":{}}\n\n")) + _, _ = pw.Write([]byte("data: {\"type\":\"response.completed\",\"response\":{\"usage\":{\"input_tokens\":3,\"output_tokens\":5,\"input_tokens_details\":{\"cached_tokens\":1}}}}\n\n")) + }() + + result, err := svc.handleStreamingResponse(c.Request.Context(), resp, c, &Account{ID: 1}, time.Now(), "model", "model") + _ = pr.Close() + if err != nil { + t.Fatalf("expected nil error, got %v", err) + } + if result == nil || result.usage == nil { + t.Fatalf("expected usage result") + } + if result.usage.InputTokens != 3 || result.usage.OutputTokens != 5 || result.usage.CacheReadInputTokens != 1 { + t.Fatalf("unexpected usage: %+v", *result.usage) + } + if strings.Contains(rec.Body.String(), "event: error") || strings.Contains(rec.Body.String(), "write_failed") { + t.Fatalf("expected no injected SSE error event, got %q", rec.Body.String()) + } +} + func TestOpenAIStreamingTooLong(t *testing.T) { gin.SetMode(gin.TestMode) cfg := &config.Config{ From 65fd0d15ae0f5b1b454d27a02e7df3e8b5670b2d Mon Sep 17 00:00:00 2001 From: cyhhao Date: Fri, 16 Jan 2026 00:41:29 +0800 Subject: [PATCH 005/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E8=A1=A5?= =?UTF-8?q?=E9=BD=90=E9=9D=9E=20Claude=20Code=20OAuth=20=E5=85=BC=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/pkg/claude/constants.go | 6 + backend/internal/service/account.go | 16 ++ backend/internal/service/gateway_service.go | 239 +++++++++++++++++--- 3 files changed, 232 insertions(+), 29 deletions(-) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index f60412c2..0c6e9b4c 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -15,6 +15,12 @@ const ( // DefaultBetaHeader Claude Code 客户端默认的 anthropic-beta header const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming +// MessageBetaHeaderNoTools /v1/messages 在无工具时的 beta header +const MessageBetaHeaderNoTools = BetaOAuth + "," + BetaInterleavedThinking + +// MessageBetaHeaderWithTools /v1/messages 在有工具时的 beta header +const MessageBetaHeaderWithTools = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + // CountTokensBetaHeader count_tokens 请求使用的 anthropic-beta header const CountTokensBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaTokenCounting diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go index cfce9bfa..435eecd9 100644 --- a/backend/internal/service/account.go +++ b/backend/internal/service/account.go @@ -364,6 +364,22 @@ func (a *Account) GetExtraString(key string) string { return "" } +func (a *Account) GetClaudeUserID() string { + if v := strings.TrimSpace(a.GetExtraString("claude_user_id")); v != "" { + return v + } + if v := strings.TrimSpace(a.GetExtraString("anthropic_user_id")); v != "" { + return v + } + if v := strings.TrimSpace(a.GetCredential("claude_user_id")); v != "" { + return v + } + if v := strings.TrimSpace(a.GetCredential("anthropic_user_id")); v != "" { + return v + } + return "" +} + func (a *Account) IsCustomErrorCodesEnabled() bool { if a.Type != AccountTypeAPIKey || a.Credentials == nil { return false diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 904b5acd..790d9fa2 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -51,6 +51,9 @@ var ( toolNameCamelRe = regexp.MustCompile(`([a-z0-9])([A-Z])`) toolNameFieldRe = regexp.MustCompile(`"name"\s*:\s*"([^"]+)"`) modelFieldRe = regexp.MustCompile(`"model"\s*:\s*"([^"]+)"`) + toolDescAbsPathRe = regexp.MustCompile(`/\/?(?:home|Users|tmp|var|opt|usr|etc)\/[^\s,\)"'\]]+`) + toolDescWinPathRe = regexp.MustCompile(`(?i)[A-Z]:\\[^\s,\)"'\]]+`) + opencodeTextRe = regexp.MustCompile(`(?i)opencode`) claudeToolNameOverrides = map[string]string{ "bash": "Bash", @@ -451,6 +454,22 @@ func normalizeToolNameForClaude(name string, cache map[string]string) string { } func normalizeToolNameForOpenCode(name string, cache map[string]string) string { + if name == "" { + return name + } + stripped := stripToolPrefix(name) + if cache != nil { + if mapped, ok := cache[stripped]; ok { + return mapped + } + } + if mapped, ok := openCodeToolOverrides[stripped]; ok { + return mapped + } + return toSnakeCase(stripped) +} + +func normalizeParamNameForOpenCode(name string, cache map[string]string) string { if name == "" { return name } @@ -459,10 +478,63 @@ func normalizeToolNameForOpenCode(name string, cache map[string]string) string { return mapped } } - if mapped, ok := openCodeToolOverrides[name]; ok { - return mapped + return name +} + +func sanitizeOpenCodeText(text string) string { + if text == "" { + return text + } + text = strings.ReplaceAll(text, "OpenCode", "Claude Code") + text = opencodeTextRe.ReplaceAllString(text, "Claude") + return text +} + +func sanitizeToolDescription(description string) string { + if description == "" { + return description + } + description = toolDescAbsPathRe.ReplaceAllString(description, "[path]") + description = toolDescWinPathRe.ReplaceAllString(description, "[path]") + return sanitizeOpenCodeText(description) +} + +func normalizeToolInputSchema(inputSchema any, cache map[string]string) { + schema, ok := inputSchema.(map[string]any) + if !ok { + return + } + properties, ok := schema["properties"].(map[string]any) + if !ok { + return + } + + newProperties := make(map[string]any, len(properties)) + for key, value := range properties { + snakeKey := toSnakeCase(key) + newProperties[snakeKey] = value + if snakeKey != key && cache != nil { + cache[snakeKey] = key + } + } + schema["properties"] = newProperties + + if required, ok := schema["required"].([]any); ok { + newRequired := make([]any, 0, len(required)) + for _, item := range required { + name, ok := item.(string) + if !ok { + newRequired = append(newRequired, item) + continue + } + snakeName := toSnakeCase(name) + newRequired = append(newRequired, snakeName) + if snakeName != name && cache != nil { + cache[snakeName] = name + } + } + schema["required"] = newRequired } - return toSnakeCase(name) } func stripCacheControlFromSystemBlocks(system any) bool { @@ -479,9 +551,6 @@ func stripCacheControlFromSystemBlocks(system any) bool { if _, exists := block["cache_control"]; !exists { continue } - if text, ok := block["text"].(string); ok && text == claudeCodeSystemPrompt { - continue - } delete(block, "cache_control") changed = true } @@ -499,6 +568,34 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu toolNameMap := make(map[string]string) + if system, ok := req["system"]; ok { + switch v := system.(type) { + case string: + sanitized := sanitizeOpenCodeText(v) + if sanitized != v { + req["system"] = sanitized + } + case []any: + for _, item := range v { + block, ok := item.(map[string]any) + if !ok { + continue + } + if blockType, _ := block["type"].(string); blockType != "text" { + continue + } + text, ok := block["text"].(string) + if !ok || text == "" { + continue + } + sanitized := sanitizeOpenCodeText(text) + if sanitized != text { + block["text"] = sanitized + } + } + } + } + if rawModel, ok := req["model"].(string); ok { normalized := claude.NormalizeModelID(rawModel) if normalized != rawModel { @@ -521,6 +618,15 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu toolMap["name"] = normalized } } + if desc, ok := toolMap["description"].(string); ok { + sanitized := sanitizeToolDescription(desc) + if sanitized != desc { + toolMap["description"] = sanitized + } + } + if schema, ok := toolMap["input_schema"]; ok { + normalizeToolInputSchema(schema, toolNameMap) + } tools[idx] = toolMap } req["tools"] = tools @@ -532,13 +638,15 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu normalized = name } if toolMap, ok := value.(map[string]any); ok { - if toolName, ok := toolMap["name"].(string); ok { - mappedName := normalizeToolNameForClaude(toolName, toolNameMap) - if mappedName != "" && mappedName != toolName { - toolMap["name"] = mappedName + toolMap["name"] = normalized + if desc, ok := toolMap["description"].(string); ok { + sanitized := sanitizeToolDescription(desc) + if sanitized != desc { + toolMap["description"] = sanitized } - } else if normalized != name { - toolMap["name"] = normalized + } + if schema, ok := toolMap["input_schema"]; ok { + normalizeToolInputSchema(schema, toolNameMap) } normalizedTools[normalized] = toolMap continue @@ -611,7 +719,7 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu } func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account *Account, fp *Fingerprint) string { - if parsed == nil || fp == nil || fp.ClientID == "" { + if parsed == nil || account == nil { return "" } if parsed.MetadataUserID != "" { @@ -621,13 +729,22 @@ func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account if accountUUID == "" { return "" } + + userID := strings.TrimSpace(account.GetClaudeUserID()) + if userID == "" && fp != nil { + userID = fp.ClientID + } + if userID == "" { + return "" + } + sessionHash := s.GenerateSessionHash(parsed) sessionID := uuid.NewString() if sessionHash != "" { seed := fmt.Sprintf("%d::%s", account.ID, sessionHash) sessionID = generateSessionUUID(seed) } - return fmt.Sprintf("user_%s_account_%s_session_%s", fp.ClientID, accountUUID, sessionID) + return fmt.Sprintf("user_%s_account_%s_session_%s", userID, accountUUID, sessionID) } func generateSessionUUID(seed string) string { @@ -2213,7 +2330,11 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 处理anthropic-beta header(OAuth账号需要特殊处理) if tokenType == "oauth" && mimicClaudeCode { - req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + if requestHasTools(body) { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) + } else { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + } } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) if requestNeedsBetaFeatures(body) { @@ -2284,6 +2405,20 @@ func requestNeedsBetaFeatures(body []byte) bool { return false } +func requestHasTools(body []byte) bool { + tools := gjson.GetBytes(body, "tools") + if !tools.Exists() { + return false + } + if tools.IsArray() { + return len(tools.Array()) > 0 + } + if tools.IsObject() { + return len(tools.Map()) > 0 + } + return false +} + func defaultAPIKeyBetaHeader(body []byte) string { modelID := gjson.GetBytes(body, "model").String() if strings.Contains(strings.ToLower(modelID), "haiku") { @@ -2817,6 +2952,45 @@ func (s *GatewayService) replaceModelInSSELine(line, fromModel, toModel string) return "data: " + string(newData) } +func rewriteParamKeysInValue(value any, cache map[string]string) (any, bool) { + switch v := value.(type) { + case map[string]any: + changed := false + rewritten := make(map[string]any, len(v)) + for key, item := range v { + newKey := normalizeParamNameForOpenCode(key, cache) + newItem, childChanged := rewriteParamKeysInValue(item, cache) + if childChanged { + changed = true + } + if newKey != key { + changed = true + } + rewritten[newKey] = newItem + } + if !changed { + return value, false + } + return rewritten, true + case []any: + changed := false + rewritten := make([]any, len(v)) + for idx, item := range v { + newItem, childChanged := rewriteParamKeysInValue(item, cache) + if childChanged { + changed = true + } + rewritten[idx] = newItem + } + if !changed { + return value, false + } + return rewritten, true + default: + return value, false + } +} + func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { switch v := value.(type) { case map[string]any: @@ -2829,6 +3003,15 @@ func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { changed = true } } + if input, ok := v["input"].(map[string]any); ok { + rewrittenInput, inputChanged := rewriteParamKeysInValue(input, toolNameMap) + if inputChanged { + if m, ok := rewrittenInput.(map[string]any); ok { + v["input"] = m + changed = true + } + } + } } for _, item := range v { if rewriteToolNamesInValue(item, toolNameMap) { @@ -2877,6 +3060,15 @@ func replaceToolNamesInText(text string, toolNameMap map[string]string) string { } return strings.Replace(match, model, mapped, 1) }) + + for mapped, original := range toolNameMap { + if mapped == "" || original == "" || mapped == original { + continue + } + output = strings.ReplaceAll(output, "\""+mapped+"\":", "\""+original+"\":") + output = strings.ReplaceAll(output, "\\\""+mapped+"\\\":", "\\\""+original+"\\\":") + } + return output } @@ -2889,22 +3081,11 @@ func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[ return line } - var event map[string]any - if err := json.Unmarshal([]byte(data), &event); err != nil { - replaced := replaceToolNamesInText(data, toolNameMap) - if replaced == data { - return line - } - return "data: " + replaced - } - if !rewriteToolNamesInValue(event, toolNameMap) { + replaced := replaceToolNamesInText(data, toolNameMap) + if replaced == data { return line } - newData, err := json.Marshal(event) - if err != nil { - return line - } - return "data: " + string(newData) + return "data: " + replaced } func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { From bd854e1750e568c4a02b3a276e68bcd6336f5368 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Fri, 16 Jan 2026 23:15:52 +0800 Subject: [PATCH 006/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20Claude=20Cod?= =?UTF-8?q?e=20OAuth=20=E8=A1=A5=E9=BD=90=20oauth=20beta?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 34 ++++++++++++++++----- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 790d9fa2..aa811bf5 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2328,12 +2328,19 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex applyClaudeOAuthHeaderDefaults(req, reqStream) } - // 处理anthropic-beta header(OAuth账号需要特殊处理) - if tokenType == "oauth" && mimicClaudeCode { - if requestHasTools(body) { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) + // 处理 anthropic-beta header(OAuth 账号需要包含 oauth beta) + if tokenType == "oauth" { + if mimicClaudeCode { + // 非 Claude Code 客户端:按 Claude Code 规则生成 beta header + if requestHasTools(body) { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) + } else { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + } } else { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + // Claude Code 客户端:尽量透传原始 header,仅补齐 oauth beta + clientBetaHeader := req.Header.Get("anthropic-beta") + req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, clientBetaHeader)) } } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) @@ -3576,8 +3583,21 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:处理 anthropic-beta header - if tokenType == "oauth" && mimicClaudeCode { - req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + if tokenType == "oauth" { + if mimicClaudeCode { + req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + } else { + clientBetaHeader := req.Header.Get("anthropic-beta") + if clientBetaHeader == "" { + req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + } else { + beta := s.getBetaHeader(modelID, clientBetaHeader) + if !strings.Contains(beta, claude.BetaTokenCounting) { + beta = beta + "," + claude.BetaTokenCounting + } + req.Header.Set("anthropic-beta", beta) + } + } } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:与 messages 同步的按需 beta 注入(默认关闭) if requestNeedsBetaFeatures(body) { From 2a7d04fec4f452bc20b73ab0fa04da9ef6fd7870 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 15 Jan 2026 18:54:42 +0800 Subject: [PATCH 007/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E5=AF=B9?= =?UTF-8?q?=E9=BD=90=20Claude=20OAuth=20=E8=AF=B7=E6=B1=82=E9=80=82?= =?UTF-8?q?=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/pkg/claude/constants.go | 44 +- backend/internal/service/gateway_service.go | 455 ++++++++++++++++++- backend/internal/service/identity_service.go | 8 +- 3 files changed, 481 insertions(+), 26 deletions(-) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index d1a56a84..15144881 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -25,15 +25,15 @@ const APIKeyHaikuBetaHeader = BetaInterleavedThinking // DefaultHeaders 是 Claude Code 客户端默认请求头。 var DefaultHeaders = map[string]string{ - "User-Agent": "claude-cli/2.0.62 (external, cli)", + "User-Agent": "claude-cli/2.1.2 (external, cli)", "X-Stainless-Lang": "js", - "X-Stainless-Package-Version": "0.52.0", + "X-Stainless-Package-Version": "0.70.0", "X-Stainless-OS": "Linux", "X-Stainless-Arch": "x64", "X-Stainless-Runtime": "node", - "X-Stainless-Runtime-Version": "v22.14.0", + "X-Stainless-Runtime-Version": "v24.3.0", "X-Stainless-Retry-Count": "0", - "X-Stainless-Timeout": "60", + "X-Stainless-Timeout": "600", "X-App": "cli", "Anthropic-Dangerous-Direct-Browser-Access": "true", } @@ -79,3 +79,39 @@ func DefaultModelIDs() []string { // DefaultTestModel 测试时使用的默认模型 const DefaultTestModel = "claude-sonnet-4-5-20250929" + +// ModelIDOverrides Claude OAuth 请求需要的模型 ID 映射 +var ModelIDOverrides = map[string]string{ + "claude-sonnet-4-5": "claude-sonnet-4-5-20250929", + "claude-opus-4-5": "claude-opus-4-5-20251101", + "claude-haiku-4-5": "claude-haiku-4-5-20251001", +} + +// ModelIDReverseOverrides 用于将上游模型 ID 还原为短名 +var ModelIDReverseOverrides = map[string]string{ + "claude-sonnet-4-5-20250929": "claude-sonnet-4-5", + "claude-opus-4-5-20251101": "claude-opus-4-5", + "claude-haiku-4-5-20251001": "claude-haiku-4-5", +} + +// NormalizeModelID 根据 Claude OAuth 规则映射模型 +func NormalizeModelID(id string) string { + if id == "" { + return id + } + if mapped, ok := ModelIDOverrides[id]; ok { + return mapped + } + return id +} + +// DenormalizeModelID 将上游模型 ID 转换为短名 +func DenormalizeModelID(id string) string { + if id == "" { + return id + } + if mapped, ok := ModelIDReverseOverrides[id]; ok { + return mapped + } + return id +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 1e3221d3..899a0fc5 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -18,12 +18,14 @@ import ( "strings" "sync/atomic" "time" + "unicode" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" + "github.com/google/uuid" "github.com/tidwall/gjson" "github.com/tidwall/sjson" @@ -60,6 +62,36 @@ var ( sseDataRe = regexp.MustCompile(`^data:\s*`) sessionIDRegex = regexp.MustCompile(`session_([a-f0-9-]{36})`) claudeCliUserAgentRe = regexp.MustCompile(`^claude-cli/\d+\.\d+\.\d+`) + toolPrefixRe = regexp.MustCompile(`(?i)^(?:oc_|mcp_)`) + toolNameBoundaryRe = regexp.MustCompile(`[^a-zA-Z0-9]+`) + toolNameCamelRe = regexp.MustCompile(`([a-z0-9])([A-Z])`) + + claudeToolNameOverrides = map[string]string{ + "bash": "Bash", + "read": "Read", + "edit": "Edit", + "write": "Write", + "task": "Task", + "glob": "Glob", + "grep": "Grep", + "webfetch": "WebFetch", + "websearch": "WebSearch", + "todowrite": "TodoWrite", + "question": "AskUserQuestion", + } + openCodeToolOverrides = map[string]string{ + "Bash": "bash", + "Read": "read", + "Edit": "edit", + "Write": "write", + "Task": "task", + "Glob": "glob", + "Grep": "grep", + "WebFetch": "webfetch", + "WebSearch": "websearch", + "TodoWrite": "todowrite", + "AskUserQuestion": "question", + } // claudeCodePromptPrefixes 用于检测 Claude Code 系统提示词的前缀列表 // 支持多种变体:标准版、Agent SDK 版、Explore Agent 版、Compact 版等 @@ -365,6 +397,268 @@ func (s *GatewayService) replaceModelInBody(body []byte, newModel string) []byte return newBody } +type claudeOAuthNormalizeOptions struct { + injectMetadata bool + metadataUserID string + stripSystemCacheControl bool +} + +func stripToolPrefix(value string) string { + if value == "" { + return value + } + return toolPrefixRe.ReplaceAllString(value, "") +} + +func toPascalCase(value string) string { + if value == "" { + return value + } + normalized := toolNameBoundaryRe.ReplaceAllString(value, " ") + tokens := make([]string, 0) + for _, token := range strings.Fields(normalized) { + expanded := toolNameCamelRe.ReplaceAllString(token, "$1 $2") + parts := strings.Fields(expanded) + if len(parts) > 0 { + tokens = append(tokens, parts...) + } + } + if len(tokens) == 0 { + return value + } + var builder strings.Builder + for _, token := range tokens { + lower := strings.ToLower(token) + if lower == "" { + continue + } + runes := []rune(lower) + runes[0] = unicode.ToUpper(runes[0]) + builder.WriteString(string(runes)) + } + return builder.String() +} + +func toSnakeCase(value string) string { + if value == "" { + return value + } + output := toolNameCamelRe.ReplaceAllString(value, "$1_$2") + output = toolNameBoundaryRe.ReplaceAllString(output, "_") + output = strings.Trim(output, "_") + return strings.ToLower(output) +} + +func normalizeToolNameForClaude(name string, cache map[string]string) string { + if name == "" { + return name + } + stripped := stripToolPrefix(name) + mapped, ok := claudeToolNameOverrides[strings.ToLower(stripped)] + if !ok { + mapped = toPascalCase(stripped) + } + if mapped != "" && cache != nil && mapped != stripped { + cache[mapped] = stripped + } + if mapped == "" { + return stripped + } + return mapped +} + +func normalizeToolNameForOpenCode(name string, cache map[string]string) string { + if name == "" { + return name + } + if cache != nil { + if mapped, ok := cache[name]; ok { + return mapped + } + } + if mapped, ok := openCodeToolOverrides[name]; ok { + return mapped + } + return toSnakeCase(name) +} + +func stripCacheControlFromSystemBlocks(system any) bool { + blocks, ok := system.([]any) + if !ok { + return false + } + changed := false + for _, item := range blocks { + block, ok := item.(map[string]any) + if !ok { + continue + } + if _, exists := block["cache_control"]; !exists { + continue + } + if text, ok := block["text"].(string); ok && text == claudeCodeSystemPrompt { + continue + } + delete(block, "cache_control") + changed = true + } + return changed +} + +func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAuthNormalizeOptions) ([]byte, string, map[string]string) { + if len(body) == 0 { + return body, modelID, nil + } + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return body, modelID, nil + } + + toolNameMap := make(map[string]string) + + if rawModel, ok := req["model"].(string); ok { + normalized := claude.NormalizeModelID(rawModel) + if normalized != rawModel { + req["model"] = normalized + modelID = normalized + } + } + + if rawTools, exists := req["tools"]; exists { + switch tools := rawTools.(type) { + case []any: + for idx, tool := range tools { + toolMap, ok := tool.(map[string]any) + if !ok { + continue + } + if name, ok := toolMap["name"].(string); ok { + normalized := normalizeToolNameForClaude(name, toolNameMap) + if normalized != "" && normalized != name { + toolMap["name"] = normalized + } + } + tools[idx] = toolMap + } + req["tools"] = tools + case map[string]any: + normalizedTools := make(map[string]any, len(tools)) + for name, value := range tools { + normalized := normalizeToolNameForClaude(name, toolNameMap) + if normalized == "" { + normalized = name + } + if toolMap, ok := value.(map[string]any); ok { + if toolName, ok := toolMap["name"].(string); ok { + mappedName := normalizeToolNameForClaude(toolName, toolNameMap) + if mappedName != "" && mappedName != toolName { + toolMap["name"] = mappedName + } + } else if normalized != name { + toolMap["name"] = normalized + } + normalizedTools[normalized] = toolMap + continue + } + normalizedTools[normalized] = value + } + req["tools"] = normalizedTools + } + } else { + req["tools"] = []any{} + } + + if messages, ok := req["messages"].([]any); ok { + for _, msg := range messages { + msgMap, ok := msg.(map[string]any) + if !ok { + continue + } + content, ok := msgMap["content"].([]any) + if !ok { + continue + } + for _, block := range content { + blockMap, ok := block.(map[string]any) + if !ok { + continue + } + if blockType, _ := blockMap["type"].(string); blockType != "tool_use" { + continue + } + if name, ok := blockMap["name"].(string); ok { + normalized := normalizeToolNameForClaude(name, toolNameMap) + if normalized != "" && normalized != name { + blockMap["name"] = normalized + } + } + } + } + } + + if opts.stripSystemCacheControl { + if system, ok := req["system"]; ok { + _ = stripCacheControlFromSystemBlocks(system) + } + } + + if opts.injectMetadata && opts.metadataUserID != "" { + metadata, ok := req["metadata"].(map[string]any) + if !ok { + metadata = map[string]any{} + req["metadata"] = metadata + } + if existing, ok := metadata["user_id"].(string); !ok || existing == "" { + metadata["user_id"] = opts.metadataUserID + } + } + + if _, ok := req["temperature"]; ok { + delete(req, "temperature") + } + if _, ok := req["tool_choice"]; ok { + delete(req, "tool_choice") + } + + newBody, err := json.Marshal(req) + if err != nil { + return body, modelID, toolNameMap + } + return newBody, modelID, toolNameMap +} + +func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account *Account, fp *Fingerprint) string { + if parsed == nil || fp == nil || fp.ClientID == "" { + return "" + } + if parsed.MetadataUserID != "" { + return "" + } + accountUUID := account.GetExtraString("account_uuid") + if accountUUID == "" { + return "" + } + sessionHash := s.GenerateSessionHash(parsed) + sessionID := uuid.NewString() + if sessionHash != "" { + seed := fmt.Sprintf("%d::%s", account.ID, sessionHash) + sessionID = generateSessionUUID(seed) + } + return fmt.Sprintf("user_%s_account_%s_session_%s", fp.ClientID, accountUUID, sessionID) +} + +func generateSessionUUID(seed string) string { + if seed == "" { + return uuid.NewString() + } + hash := sha256.Sum256([]byte(seed)) + bytes := hash[:16] + bytes[6] = (bytes[6] & 0x0f) | 0x40 + bytes[8] = (bytes[8] & 0x3f) | 0x80 + return fmt.Sprintf("%x-%x-%x-%x-%x", + bytes[0:4], bytes[4:6], bytes[6:8], bytes[8:10], bytes[10:16]) +} + // SelectAccount 选择账号(粘性会话+优先级) func (s *GatewayService) SelectAccount(ctx context.Context, groupID *int64, sessionHash string) (*Account, error) { return s.SelectAccountForModel(ctx, groupID, sessionHash, "") @@ -1906,21 +2200,36 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A body := parsed.Body reqModel := parsed.Model reqStream := parsed.Stream + originalModel := reqModel + var toolNameMap map[string]string - // 智能注入 Claude Code 系统提示词(仅 OAuth/SetupToken 账号需要) - // 条件:1) OAuth/SetupToken 账号 2) 不是 Claude Code 客户端 3) 不是 Haiku 模型 4) system 中还没有 Claude Code 提示词 - if account.IsOAuth() && - !isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) && - !strings.Contains(strings.ToLower(reqModel), "haiku") && - !systemIncludesClaudeCodePrompt(parsed.System) { - body = injectClaudeCodePrompt(body, parsed.System) + if account.IsOAuth() { + // 智能注入 Claude Code 系统提示词(仅 OAuth/SetupToken 账号需要) + // 条件:1) OAuth/SetupToken 账号 2) 不是 Claude Code 客户端 3) 不是 Haiku 模型 4) system 中还没有 Claude Code 提示词 + if !isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) && + !strings.Contains(strings.ToLower(reqModel), "haiku") && + !systemIncludesClaudeCodePrompt(parsed.System) { + body = injectClaudeCodePrompt(body, parsed.System) + } + + normalizeOpts := claudeOAuthNormalizeOptions{stripSystemCacheControl: true} + if s.identityService != nil { + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + if err == nil && fp != nil { + if metadataUserID := s.buildOAuthMetadataUserID(parsed, account, fp); metadataUserID != "" { + normalizeOpts.injectMetadata = true + normalizeOpts.metadataUserID = metadataUserID + } + } + } + + body, reqModel, toolNameMap = normalizeClaudeOAuthRequestBody(body, reqModel, normalizeOpts) } // 强制执行 cache_control 块数量限制(最多 4 个) body = enforceCacheControlLimit(body) // 应用模型映射(仅对apikey类型账号) - originalModel := reqModel if account.Type == AccountTypeAPIKey { mappedModel := account.GetMappedModel(reqModel) if mappedModel != reqModel { @@ -1948,10 +2257,9 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A retryStart := time.Now() for attempt := 1; attempt <= maxRetryAttempts; attempt++ { // 构建上游请求(每次重试需要重新构建,因为请求体需要重新读取) - upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel) + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel, reqStream) // Capture upstream request body for ops retry of this attempt. c.Set(OpsUpstreamRequestBodyKey, string(body)) - if err != nil { return nil, err } @@ -2029,7 +2337,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // also downgrade tool_use/tool_result blocks to text. filteredBody := FilterThinkingBlocksForRetry(body) - retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) + retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, reqStream) if buildErr == nil { retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr == nil { @@ -2061,7 +2369,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A if looksLikeToolSignatureError(msg2) && time.Since(retryStart) < maxRetryElapsed { log.Printf("Account %d: signature retry still failing and looks tool-related, retrying with tool blocks downgraded", account.ID) filteredBody2 := FilterSignatureSensitiveBlocksForRetry(body) - retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel) + retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel, reqStream) if buildErr2 == nil { retryResp2, retryErr2 := s.httpUpstream.Do(retryReq2, proxyURL, account.ID, account.Concurrency) if retryErr2 == nil { @@ -2278,7 +2586,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A var firstTokenMs *int var clientDisconnect bool if reqStream { - streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel) + streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel, toolNameMap) if err != nil { if err.Error() == "have error in stream" { return nil, &UpstreamFailoverError{ @@ -2291,7 +2599,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A firstTokenMs = streamResult.firstTokenMs clientDisconnect = streamResult.clientDisconnect } else { - usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel) + usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel, toolNameMap) if err != nil { return nil, err } @@ -2308,7 +2616,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A }, nil } -func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string) (*http.Request, error) { +func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, reqStream bool) (*http.Request, error) { // 确定目标URL targetURL := claudeAPIURL if account.Type == AccountTypeAPIKey { @@ -2377,6 +2685,9 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } + if tokenType == "oauth" { + applyClaudeOAuthHeaderDefaults(req, reqStream) + } // 处理anthropic-beta header(OAuth账号需要特殊处理) if tokenType == "oauth" { @@ -2459,6 +2770,26 @@ func defaultAPIKeyBetaHeader(body []byte) string { return claude.APIKeyBetaHeader } +func applyClaudeOAuthHeaderDefaults(req *http.Request, isStream bool) { + if req == nil { + return + } + if req.Header.Get("accept") == "" { + req.Header.Set("accept", "application/json") + } + for key, value := range claude.DefaultHeaders { + if value == "" { + continue + } + if req.Header.Get(key) == "" { + req.Header.Set(key, value) + } + } + if isStream && req.Header.Get("x-stainless-helper-method") == "" { + req.Header.Set("x-stainless-helper-method", "stream") + } +} + func truncateForLog(b []byte, maxBytes int) string { if maxBytes <= 0 { maxBytes = 2048 @@ -2739,7 +3070,7 @@ type streamingResult struct { clientDisconnect bool // 客户端是否在流式传输过程中断开 } -func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string) (*streamingResult, error) { +func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string, toolNameMap map[string]string) (*streamingResult, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -2832,6 +3163,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } needModelReplace := originalModel != mappedModel + rewriteTools := account.IsOAuth() clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage for { @@ -2873,11 +3205,14 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http // Extract data from SSE line (supports both "data: " and "data:" formats) var data string if sseDataRe.MatchString(line) { - data = sseDataRe.ReplaceAllString(line, "") // 如果有模型映射,替换响应中的model字段 if needModelReplace { line = s.replaceModelInSSELine(line, mappedModel, originalModel) } + if rewriteTools { + line = s.replaceToolNamesInSSELine(line, toolNameMap) + } + data = sseDataRe.ReplaceAllString(line, "") } // 写入客户端(统一处理 data 行和非 data 行) @@ -2960,6 +3295,61 @@ func (s *GatewayService) replaceModelInSSELine(line, fromModel, toModel string) return "data: " + string(newData) } +func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { + switch v := value.(type) { + case map[string]any: + changed := false + if blockType, _ := v["type"].(string); blockType == "tool_use" { + if name, ok := v["name"].(string); ok { + mapped := normalizeToolNameForOpenCode(name, toolNameMap) + if mapped != name { + v["name"] = mapped + changed = true + } + } + } + for _, item := range v { + if rewriteToolNamesInValue(item, toolNameMap) { + changed = true + } + } + return changed + case []any: + changed := false + for _, item := range v { + if rewriteToolNamesInValue(item, toolNameMap) { + changed = true + } + } + return changed + default: + return false + } +} + +func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[string]string) string { + if !sseDataRe.MatchString(line) { + return line + } + data := sseDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + return line + } + + var event map[string]any + if err := json.Unmarshal([]byte(data), &event); err != nil { + return line + } + if !rewriteToolNamesInValue(event, toolNameMap) { + return line + } + newData, err := json.Marshal(event) + if err != nil { + return line + } + return "data: " + string(newData) +} + func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { // 解析message_start获取input tokens(标准Claude API格式) var msgStart struct { @@ -3001,7 +3391,7 @@ func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { } } -func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string) (*ClaudeUsage, error) { +func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string, toolNameMap map[string]string) (*ClaudeUsage, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -3022,6 +3412,9 @@ func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *h if originalModel != mappedModel { body = s.replaceModelInResponseBody(body, mappedModel, originalModel) } + if account.IsOAuth() { + body = s.replaceToolNamesInResponseBody(body, toolNameMap) + } responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) @@ -3059,6 +3452,24 @@ func (s *GatewayService) replaceModelInResponseBody(body []byte, fromModel, toMo return newBody } +func (s *GatewayService) replaceToolNamesInResponseBody(body []byte, toolNameMap map[string]string) []byte { + if len(body) == 0 { + return body + } + var resp map[string]any + if err := json.Unmarshal(body, &resp); err != nil { + return body + } + if !rewriteToolNamesInValue(resp, toolNameMap) { + return body + } + newBody, err := json.Marshal(resp) + if err != nil { + return body + } + return newBody +} + // RecordUsageInput 记录使用量的输入参数 type RecordUsageInput struct { Result *ForwardResult @@ -3224,6 +3635,11 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, body := parsed.Body reqModel := parsed.Model + if account.IsOAuth() { + normalizeOpts := claudeOAuthNormalizeOptions{stripSystemCacheControl: true} + body, reqModel, _ = normalizeClaudeOAuthRequestBody(body, reqModel, normalizeOpts) + } + // Antigravity 账户不支持 count_tokens 转发,直接返回空值 if account.Platform == PlatformAntigravity { c.JSON(http.StatusOK, gin.H{"input_tokens": 0}) @@ -3412,6 +3828,9 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } + if tokenType == "oauth" { + applyClaudeOAuthHeaderDefaults(req, false) + } // OAuth 账号:处理 anthropic-beta header if tokenType == "oauth" { diff --git a/backend/internal/service/identity_service.go b/backend/internal/service/identity_service.go index 1ffa8057..4ab1ab96 100644 --- a/backend/internal/service/identity_service.go +++ b/backend/internal/service/identity_service.go @@ -24,13 +24,13 @@ var ( // 默认指纹值(当客户端未提供时使用) var defaultFingerprint = Fingerprint{ - UserAgent: "claude-cli/2.0.62 (external, cli)", + UserAgent: "claude-cli/2.1.2 (external, cli)", StainlessLang: "js", - StainlessPackageVersion: "0.52.0", + StainlessPackageVersion: "0.70.0", StainlessOS: "Linux", StainlessArch: "x64", StainlessRuntime: "node", - StainlessRuntimeVersion: "v22.14.0", + StainlessRuntimeVersion: "v24.3.0", } // Fingerprint represents account fingerprint data @@ -230,7 +230,7 @@ func generateUUIDFromSeed(seed string) string { } // parseUserAgentVersion 解析user-agent版本号 -// 例如:claude-cli/2.0.62 -> (2, 0, 62) +// 例如:claude-cli/2.1.2 -> (2, 1, 2) func parseUserAgentVersion(ua string) (major, minor, patch int, ok bool) { // 匹配 xxx/x.y.z 格式 matches := userAgentVersionRegex.FindStringSubmatch(ua) From b8c48fb4775785e4bb607585d2f77fde03444fcc Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 15 Jan 2026 19:17:07 +0800 Subject: [PATCH 008/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E5=8C=BA?= =?UTF-8?q?=E5=88=86=20Claude=20Code=20OAuth=20=E9=80=82=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/handler/gateway_handler.go | 3 + backend/internal/pkg/claude/constants.go | 4 + backend/internal/service/gateway_service.go | 110 +++++++++++++++----- 3 files changed, 90 insertions(+), 27 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index b60618a8..91d590bf 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -707,6 +707,9 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { return } + // 检查是否为 Claude Code 客户端,设置到 context 中 + SetClaudeCodeClientContext(c, body) + setOpsRequestContext(c, "", false, body) parsedReq, err := service.ParseGatewayRequest(body) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index 15144881..f60412c2 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -9,11 +9,15 @@ const ( BetaClaudeCode = "claude-code-20250219" BetaInterleavedThinking = "interleaved-thinking-2025-05-14" BetaFineGrainedToolStreaming = "fine-grained-tool-streaming-2025-05-14" + BetaTokenCounting = "token-counting-2024-11-01" ) // DefaultBetaHeader Claude Code 客户端默认的 anthropic-beta header const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming +// CountTokensBetaHeader count_tokens 请求使用的 anthropic-beta header +const CountTokensBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaTokenCounting + // HaikuBetaHeader Haiku 模型使用的 anthropic-beta header(不需要 claude-code beta) const HaikuBetaHeader = BetaOAuth + "," + BetaInterleavedThinking diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 899a0fc5..93dc59dc 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -65,6 +65,8 @@ var ( toolPrefixRe = regexp.MustCompile(`(?i)^(?:oc_|mcp_)`) toolNameBoundaryRe = regexp.MustCompile(`[^a-zA-Z0-9]+`) toolNameCamelRe = regexp.MustCompile(`([a-z0-9])([A-Z])`) + toolNameFieldRe = regexp.MustCompile(`"name"\s*:\s*"([^"]+)"`) + modelFieldRe = regexp.MustCompile(`"model"\s*:\s*"([^"]+)"`) claudeToolNameOverrides = map[string]string{ "bash": "Bash", @@ -1941,6 +1943,16 @@ func isClaudeCodeClient(userAgent string, metadataUserID string) bool { return claudeCliUserAgentRe.MatchString(userAgent) } +func isClaudeCodeRequest(ctx context.Context, c *gin.Context, parsed *ParsedRequest) bool { + if IsClaudeCodeClient(ctx) { + return true + } + if parsed == nil || c == nil { + return false + } + return isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) +} + // systemIncludesClaudeCodePrompt 检查 system 中是否已包含 Claude Code 提示词 // 使用前缀匹配支持多种变体(标准版、Agent SDK 版等) func systemIncludesClaudeCodePrompt(system any) bool { @@ -2203,11 +2215,13 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A originalModel := reqModel var toolNameMap map[string]string - if account.IsOAuth() { + isClaudeCode := isClaudeCodeRequest(ctx, c, parsed) + shouldMimicClaudeCode := account.IsOAuth() && !isClaudeCode + + if shouldMimicClaudeCode { // 智能注入 Claude Code 系统提示词(仅 OAuth/SetupToken 账号需要) // 条件:1) OAuth/SetupToken 账号 2) 不是 Claude Code 客户端 3) 不是 Haiku 模型 4) system 中还没有 Claude Code 提示词 - if !isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) && - !strings.Contains(strings.ToLower(reqModel), "haiku") && + if !strings.Contains(strings.ToLower(reqModel), "haiku") && !systemIncludesClaudeCodePrompt(parsed.System) { body = injectClaudeCodePrompt(body, parsed.System) } @@ -2257,7 +2271,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A retryStart := time.Now() for attempt := 1; attempt <= maxRetryAttempts; attempt++ { // 构建上游请求(每次重试需要重新构建,因为请求体需要重新读取) - upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel, reqStream) + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel, reqStream, shouldMimicClaudeCode) // Capture upstream request body for ops retry of this attempt. c.Set(OpsUpstreamRequestBodyKey, string(body)) if err != nil { @@ -2337,7 +2351,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // also downgrade tool_use/tool_result blocks to text. filteredBody := FilterThinkingBlocksForRetry(body) - retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, reqStream) + retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, reqStream, shouldMimicClaudeCode) if buildErr == nil { retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr == nil { @@ -2369,7 +2383,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A if looksLikeToolSignatureError(msg2) && time.Since(retryStart) < maxRetryElapsed { log.Printf("Account %d: signature retry still failing and looks tool-related, retrying with tool blocks downgraded", account.ID) filteredBody2 := FilterSignatureSensitiveBlocksForRetry(body) - retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel, reqStream) + retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel, reqStream, shouldMimicClaudeCode) if buildErr2 == nil { retryResp2, retryErr2 := s.httpUpstream.Do(retryReq2, proxyURL, account.ID, account.Concurrency) if retryErr2 == nil { @@ -2586,7 +2600,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A var firstTokenMs *int var clientDisconnect bool if reqStream { - streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel, toolNameMap) + streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel, toolNameMap, shouldMimicClaudeCode) if err != nil { if err.Error() == "have error in stream" { return nil, &UpstreamFailoverError{ @@ -2599,7 +2613,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A firstTokenMs = streamResult.firstTokenMs clientDisconnect = streamResult.clientDisconnect } else { - usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel, toolNameMap) + usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel, toolNameMap, shouldMimicClaudeCode) if err != nil { return nil, err } @@ -2616,7 +2630,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A }, nil } -func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, reqStream bool) (*http.Request, error) { +func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, reqStream bool, mimicClaudeCode bool) (*http.Request, error) { // 确定目标URL targetURL := claudeAPIURL if account.Type == AccountTypeAPIKey { @@ -2632,7 +2646,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // OAuth账号:应用统一指纹 var fingerprint *Fingerprint - if account.IsOAuth() && s.identityService != nil { + if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { // 1. 获取或创建指纹(包含随机生成的ClientID) fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err != nil { @@ -2685,12 +2699,12 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" { + if tokenType == "oauth" && mimicClaudeCode { applyClaudeOAuthHeaderDefaults(req, reqStream) } // 处理anthropic-beta header(OAuth账号需要特殊处理) - if tokenType == "oauth" { + if tokenType == "oauth" && mimicClaudeCode { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) @@ -3070,7 +3084,7 @@ type streamingResult struct { clientDisconnect bool // 客户端是否在流式传输过程中断开 } -func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string, toolNameMap map[string]string) (*streamingResult, error) { +func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string, toolNameMap map[string]string, mimicClaudeCode bool) (*streamingResult, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -3163,7 +3177,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } needModelReplace := originalModel != mappedModel - rewriteTools := account.IsOAuth() + rewriteTools := mimicClaudeCode clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage for { @@ -3327,6 +3341,37 @@ func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { } } +func replaceToolNamesInText(text string, toolNameMap map[string]string) string { + if text == "" { + return text + } + output := toolNameFieldRe.ReplaceAllStringFunc(text, func(match string) string { + submatches := toolNameFieldRe.FindStringSubmatch(match) + if len(submatches) < 2 { + return match + } + name := submatches[1] + mapped := normalizeToolNameForOpenCode(name, toolNameMap) + if mapped == name { + return match + } + return strings.Replace(match, name, mapped, 1) + }) + output = modelFieldRe.ReplaceAllStringFunc(output, func(match string) string { + submatches := modelFieldRe.FindStringSubmatch(match) + if len(submatches) < 2 { + return match + } + model := submatches[1] + mapped := claude.DenormalizeModelID(model) + if mapped == model { + return match + } + return strings.Replace(match, model, mapped, 1) + }) + return output +} + func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[string]string) string { if !sseDataRe.MatchString(line) { return line @@ -3338,7 +3383,11 @@ func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[ var event map[string]any if err := json.Unmarshal([]byte(data), &event); err != nil { - return line + replaced := replaceToolNamesInText(data, toolNameMap) + if replaced == data { + return line + } + return "data: " + replaced } if !rewriteToolNamesInValue(event, toolNameMap) { return line @@ -3391,7 +3440,7 @@ func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { } } -func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string, toolNameMap map[string]string) (*ClaudeUsage, error) { +func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string, toolNameMap map[string]string, mimicClaudeCode bool) (*ClaudeUsage, error) { // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) @@ -3412,7 +3461,7 @@ func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *h if originalModel != mappedModel { body = s.replaceModelInResponseBody(body, mappedModel, originalModel) } - if account.IsOAuth() { + if mimicClaudeCode { body = s.replaceToolNamesInResponseBody(body, toolNameMap) } @@ -3458,7 +3507,11 @@ func (s *GatewayService) replaceToolNamesInResponseBody(body []byte, toolNameMap } var resp map[string]any if err := json.Unmarshal(body, &resp); err != nil { - return body + replaced := replaceToolNamesInText(string(body), toolNameMap) + if replaced == string(body) { + return body + } + return []byte(replaced) } if !rewriteToolNamesInValue(resp, toolNameMap) { return body @@ -3635,7 +3688,10 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, body := parsed.Body reqModel := parsed.Model - if account.IsOAuth() { + isClaudeCode := isClaudeCodeRequest(ctx, c, parsed) + shouldMimicClaudeCode := account.IsOAuth() && !isClaudeCode + + if shouldMimicClaudeCode { normalizeOpts := claudeOAuthNormalizeOptions{stripSystemCacheControl: true} body, reqModel, _ = normalizeClaudeOAuthRequestBody(body, reqModel, normalizeOpts) } @@ -3666,7 +3722,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, } // 构建上游请求 - upstreamReq, err := s.buildCountTokensRequest(ctx, c, account, body, token, tokenType, reqModel) + upstreamReq, err := s.buildCountTokensRequest(ctx, c, account, body, token, tokenType, reqModel, shouldMimicClaudeCode) if err != nil { s.countTokensError(c, http.StatusInternalServerError, "api_error", "Failed to build request") return err @@ -3699,7 +3755,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, log.Printf("Account %d: detected thinking block signature error on count_tokens, retrying with filtered thinking blocks", account.ID) filteredBody := FilterThinkingBlocksForRetry(body) - retryReq, buildErr := s.buildCountTokensRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) + retryReq, buildErr := s.buildCountTokensRequest(ctx, c, account, filteredBody, token, tokenType, reqModel, shouldMimicClaudeCode) if buildErr == nil { retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr == nil { @@ -3764,7 +3820,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, } // buildCountTokensRequest 构建 count_tokens 上游请求 -func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string) (*http.Request, error) { +func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string, mimicClaudeCode bool) (*http.Request, error) { // 确定目标 URL targetURL := claudeAPICountTokensURL if account.Type == AccountTypeAPIKey { @@ -3779,7 +3835,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用统一指纹和重写 userID - if account.IsOAuth() && s.identityService != nil { + if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err == nil { accountUUID := account.GetExtraString("account_uuid") @@ -3814,7 +3870,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用指纹到请求头 - if account.IsOAuth() && s.identityService != nil { + if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if fp != nil { s.identityService.ApplyFingerprint(req, fp) @@ -3828,13 +3884,13 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" { + if tokenType == "oauth" && mimicClaudeCode { applyClaudeOAuthHeaderDefaults(req, false) } // OAuth 账号:处理 anthropic-beta header - if tokenType == "oauth" { - req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + if tokenType == "oauth" && mimicClaudeCode { + req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:与 messages 同步的按需 beta 注入(默认关闭) if requestNeedsBetaFeatures(body) { From 0962ba43c0fcc517225d716b056cc3dd3d71125f Mon Sep 17 00:00:00 2001 From: cyhhao Date: Fri, 16 Jan 2026 00:41:29 +0800 Subject: [PATCH 009/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E8=A1=A5?= =?UTF-8?q?=E9=BD=90=E9=9D=9E=20Claude=20Code=20OAuth=20=E5=85=BC=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/pkg/claude/constants.go | 6 + backend/internal/service/account.go | 16 ++ backend/internal/service/gateway_service.go | 239 +++++++++++++++++--- 3 files changed, 232 insertions(+), 29 deletions(-) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index f60412c2..0c6e9b4c 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -15,6 +15,12 @@ const ( // DefaultBetaHeader Claude Code 客户端默认的 anthropic-beta header const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming +// MessageBetaHeaderNoTools /v1/messages 在无工具时的 beta header +const MessageBetaHeaderNoTools = BetaOAuth + "," + BetaInterleavedThinking + +// MessageBetaHeaderWithTools /v1/messages 在有工具时的 beta header +const MessageBetaHeaderWithTools = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + // CountTokensBetaHeader count_tokens 请求使用的 anthropic-beta header const CountTokensBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaTokenCounting diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go index 0d7a9cf9..9f965682 100644 --- a/backend/internal/service/account.go +++ b/backend/internal/service/account.go @@ -381,6 +381,22 @@ func (a *Account) GetExtraString(key string) string { return "" } +func (a *Account) GetClaudeUserID() string { + if v := strings.TrimSpace(a.GetExtraString("claude_user_id")); v != "" { + return v + } + if v := strings.TrimSpace(a.GetExtraString("anthropic_user_id")); v != "" { + return v + } + if v := strings.TrimSpace(a.GetCredential("claude_user_id")); v != "" { + return v + } + if v := strings.TrimSpace(a.GetCredential("anthropic_user_id")); v != "" { + return v + } + return "" +} + func (a *Account) IsCustomErrorCodesEnabled() bool { if a.Type != AccountTypeAPIKey || a.Credentials == nil { return false diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 93dc59dc..71ad0d00 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -67,6 +67,9 @@ var ( toolNameCamelRe = regexp.MustCompile(`([a-z0-9])([A-Z])`) toolNameFieldRe = regexp.MustCompile(`"name"\s*:\s*"([^"]+)"`) modelFieldRe = regexp.MustCompile(`"model"\s*:\s*"([^"]+)"`) + toolDescAbsPathRe = regexp.MustCompile(`/\/?(?:home|Users|tmp|var|opt|usr|etc)\/[^\s,\)"'\]]+`) + toolDescWinPathRe = regexp.MustCompile(`(?i)[A-Z]:\\[^\s,\)"'\]]+`) + opencodeTextRe = regexp.MustCompile(`(?i)opencode`) claudeToolNameOverrides = map[string]string{ "bash": "Bash", @@ -470,6 +473,22 @@ func normalizeToolNameForClaude(name string, cache map[string]string) string { } func normalizeToolNameForOpenCode(name string, cache map[string]string) string { + if name == "" { + return name + } + stripped := stripToolPrefix(name) + if cache != nil { + if mapped, ok := cache[stripped]; ok { + return mapped + } + } + if mapped, ok := openCodeToolOverrides[stripped]; ok { + return mapped + } + return toSnakeCase(stripped) +} + +func normalizeParamNameForOpenCode(name string, cache map[string]string) string { if name == "" { return name } @@ -478,10 +497,63 @@ func normalizeToolNameForOpenCode(name string, cache map[string]string) string { return mapped } } - if mapped, ok := openCodeToolOverrides[name]; ok { - return mapped + return name +} + +func sanitizeOpenCodeText(text string) string { + if text == "" { + return text + } + text = strings.ReplaceAll(text, "OpenCode", "Claude Code") + text = opencodeTextRe.ReplaceAllString(text, "Claude") + return text +} + +func sanitizeToolDescription(description string) string { + if description == "" { + return description + } + description = toolDescAbsPathRe.ReplaceAllString(description, "[path]") + description = toolDescWinPathRe.ReplaceAllString(description, "[path]") + return sanitizeOpenCodeText(description) +} + +func normalizeToolInputSchema(inputSchema any, cache map[string]string) { + schema, ok := inputSchema.(map[string]any) + if !ok { + return + } + properties, ok := schema["properties"].(map[string]any) + if !ok { + return + } + + newProperties := make(map[string]any, len(properties)) + for key, value := range properties { + snakeKey := toSnakeCase(key) + newProperties[snakeKey] = value + if snakeKey != key && cache != nil { + cache[snakeKey] = key + } + } + schema["properties"] = newProperties + + if required, ok := schema["required"].([]any); ok { + newRequired := make([]any, 0, len(required)) + for _, item := range required { + name, ok := item.(string) + if !ok { + newRequired = append(newRequired, item) + continue + } + snakeName := toSnakeCase(name) + newRequired = append(newRequired, snakeName) + if snakeName != name && cache != nil { + cache[snakeName] = name + } + } + schema["required"] = newRequired } - return toSnakeCase(name) } func stripCacheControlFromSystemBlocks(system any) bool { @@ -498,9 +570,6 @@ func stripCacheControlFromSystemBlocks(system any) bool { if _, exists := block["cache_control"]; !exists { continue } - if text, ok := block["text"].(string); ok && text == claudeCodeSystemPrompt { - continue - } delete(block, "cache_control") changed = true } @@ -518,6 +587,34 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu toolNameMap := make(map[string]string) + if system, ok := req["system"]; ok { + switch v := system.(type) { + case string: + sanitized := sanitizeOpenCodeText(v) + if sanitized != v { + req["system"] = sanitized + } + case []any: + for _, item := range v { + block, ok := item.(map[string]any) + if !ok { + continue + } + if blockType, _ := block["type"].(string); blockType != "text" { + continue + } + text, ok := block["text"].(string) + if !ok || text == "" { + continue + } + sanitized := sanitizeOpenCodeText(text) + if sanitized != text { + block["text"] = sanitized + } + } + } + } + if rawModel, ok := req["model"].(string); ok { normalized := claude.NormalizeModelID(rawModel) if normalized != rawModel { @@ -540,6 +637,15 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu toolMap["name"] = normalized } } + if desc, ok := toolMap["description"].(string); ok { + sanitized := sanitizeToolDescription(desc) + if sanitized != desc { + toolMap["description"] = sanitized + } + } + if schema, ok := toolMap["input_schema"]; ok { + normalizeToolInputSchema(schema, toolNameMap) + } tools[idx] = toolMap } req["tools"] = tools @@ -551,13 +657,15 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu normalized = name } if toolMap, ok := value.(map[string]any); ok { - if toolName, ok := toolMap["name"].(string); ok { - mappedName := normalizeToolNameForClaude(toolName, toolNameMap) - if mappedName != "" && mappedName != toolName { - toolMap["name"] = mappedName + toolMap["name"] = normalized + if desc, ok := toolMap["description"].(string); ok { + sanitized := sanitizeToolDescription(desc) + if sanitized != desc { + toolMap["description"] = sanitized } - } else if normalized != name { - toolMap["name"] = normalized + } + if schema, ok := toolMap["input_schema"]; ok { + normalizeToolInputSchema(schema, toolNameMap) } normalizedTools[normalized] = toolMap continue @@ -630,7 +738,7 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu } func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account *Account, fp *Fingerprint) string { - if parsed == nil || fp == nil || fp.ClientID == "" { + if parsed == nil || account == nil { return "" } if parsed.MetadataUserID != "" { @@ -640,13 +748,22 @@ func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account if accountUUID == "" { return "" } + + userID := strings.TrimSpace(account.GetClaudeUserID()) + if userID == "" && fp != nil { + userID = fp.ClientID + } + if userID == "" { + return "" + } + sessionHash := s.GenerateSessionHash(parsed) sessionID := uuid.NewString() if sessionHash != "" { seed := fmt.Sprintf("%d::%s", account.ID, sessionHash) sessionID = generateSessionUUID(seed) } - return fmt.Sprintf("user_%s_account_%s_session_%s", fp.ClientID, accountUUID, sessionID) + return fmt.Sprintf("user_%s_account_%s_session_%s", userID, accountUUID, sessionID) } func generateSessionUUID(seed string) string { @@ -2705,7 +2822,11 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 处理anthropic-beta header(OAuth账号需要特殊处理) if tokenType == "oauth" && mimicClaudeCode { - req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + if requestHasTools(body) { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) + } else { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + } } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) if requestNeedsBetaFeatures(body) { @@ -2776,6 +2897,20 @@ func requestNeedsBetaFeatures(body []byte) bool { return false } +func requestHasTools(body []byte) bool { + tools := gjson.GetBytes(body, "tools") + if !tools.Exists() { + return false + } + if tools.IsArray() { + return len(tools.Array()) > 0 + } + if tools.IsObject() { + return len(tools.Map()) > 0 + } + return false +} + func defaultAPIKeyBetaHeader(body []byte) string { modelID := gjson.GetBytes(body, "model").String() if strings.Contains(strings.ToLower(modelID), "haiku") { @@ -3309,6 +3444,45 @@ func (s *GatewayService) replaceModelInSSELine(line, fromModel, toModel string) return "data: " + string(newData) } +func rewriteParamKeysInValue(value any, cache map[string]string) (any, bool) { + switch v := value.(type) { + case map[string]any: + changed := false + rewritten := make(map[string]any, len(v)) + for key, item := range v { + newKey := normalizeParamNameForOpenCode(key, cache) + newItem, childChanged := rewriteParamKeysInValue(item, cache) + if childChanged { + changed = true + } + if newKey != key { + changed = true + } + rewritten[newKey] = newItem + } + if !changed { + return value, false + } + return rewritten, true + case []any: + changed := false + rewritten := make([]any, len(v)) + for idx, item := range v { + newItem, childChanged := rewriteParamKeysInValue(item, cache) + if childChanged { + changed = true + } + rewritten[idx] = newItem + } + if !changed { + return value, false + } + return rewritten, true + default: + return value, false + } +} + func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { switch v := value.(type) { case map[string]any: @@ -3321,6 +3495,15 @@ func rewriteToolNamesInValue(value any, toolNameMap map[string]string) bool { changed = true } } + if input, ok := v["input"].(map[string]any); ok { + rewrittenInput, inputChanged := rewriteParamKeysInValue(input, toolNameMap) + if inputChanged { + if m, ok := rewrittenInput.(map[string]any); ok { + v["input"] = m + changed = true + } + } + } } for _, item := range v { if rewriteToolNamesInValue(item, toolNameMap) { @@ -3369,6 +3552,15 @@ func replaceToolNamesInText(text string, toolNameMap map[string]string) string { } return strings.Replace(match, model, mapped, 1) }) + + for mapped, original := range toolNameMap { + if mapped == "" || original == "" || mapped == original { + continue + } + output = strings.ReplaceAll(output, "\""+mapped+"\":", "\""+original+"\":") + output = strings.ReplaceAll(output, "\\\""+mapped+"\\\":", "\\\""+original+"\\\":") + } + return output } @@ -3381,22 +3573,11 @@ func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[ return line } - var event map[string]any - if err := json.Unmarshal([]byte(data), &event); err != nil { - replaced := replaceToolNamesInText(data, toolNameMap) - if replaced == data { - return line - } - return "data: " + replaced - } - if !rewriteToolNamesInValue(event, toolNameMap) { + replaced := replaceToolNamesInText(data, toolNameMap) + if replaced == data { return line } - newData, err := json.Marshal(event) - if err != nil { - return line - } - return "data: " + string(newData) + return "data: " + replaced } func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { From 0c011b889b980ba4626703af4d54e1879cfd3f9c Mon Sep 17 00:00:00 2001 From: cyhhao Date: Fri, 16 Jan 2026 23:15:52 +0800 Subject: [PATCH 010/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20Claude=20Cod?= =?UTF-8?q?e=20OAuth=20=E8=A1=A5=E9=BD=90=20oauth=20beta?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 34 ++++++++++++++++----- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 71ad0d00..8b4871c9 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2820,12 +2820,19 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex applyClaudeOAuthHeaderDefaults(req, reqStream) } - // 处理anthropic-beta header(OAuth账号需要特殊处理) - if tokenType == "oauth" && mimicClaudeCode { - if requestHasTools(body) { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) + // 处理 anthropic-beta header(OAuth 账号需要包含 oauth beta) + if tokenType == "oauth" { + if mimicClaudeCode { + // 非 Claude Code 客户端:按 Claude Code 规则生成 beta header + if requestHasTools(body) { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) + } else { + req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + } } else { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + // Claude Code 客户端:尽量透传原始 header,仅补齐 oauth beta + clientBetaHeader := req.Header.Get("anthropic-beta") + req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, clientBetaHeader)) } } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) @@ -4070,8 +4077,21 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:处理 anthropic-beta header - if tokenType == "oauth" && mimicClaudeCode { - req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + if tokenType == "oauth" { + if mimicClaudeCode { + req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + } else { + clientBetaHeader := req.Header.Get("anthropic-beta") + if clientBetaHeader == "" { + req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + } else { + beta := s.getBetaHeader(modelID, clientBetaHeader) + if !strings.Contains(beta, claude.BetaTokenCounting) { + beta = beta + "," + claude.BetaTokenCounting + } + req.Header.Set("anthropic-beta", beta) + } + } } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { // API-key:与 messages 同步的按需 beta 注入(默认关闭) if requestNeedsBetaFeatures(body) { From 8917a3ea8fa4ffa8943e32513b3cee5528ef516d Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 17 Jan 2026 00:27:36 +0800 Subject: [PATCH 011/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=20golangci-lint?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 8b4871c9..fb2d40a3 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -439,7 +439,7 @@ func toPascalCase(value string) string { } runes := []rune(lower) runes[0] = unicode.ToUpper(runes[0]) - builder.WriteString(string(runes)) + _, _ = builder.WriteString(string(runes)) } return builder.String() } @@ -723,12 +723,8 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu } } - if _, ok := req["temperature"]; ok { - delete(req, "temperature") - } - if _, ok := req["tool_choice"]; ok { - delete(req, "tool_choice") - } + delete(req, "temperature") + delete(req, "tool_choice") newBody, err := json.Marshal(req) if err != nil { From 4e75d8fda9f010e856741328d9a49ee66a1b3a53 Mon Sep 17 00:00:00 2001 From: nick8802754751 <> Date: Sat, 17 Jan 2026 16:06:44 +0800 Subject: [PATCH 012/214] =?UTF-8?q?fix:=20=E6=B7=BB=E5=8A=A0=E6=B7=B7?= =?UTF-8?q?=E5=90=88=E6=B8=A0=E9=81=93=E8=AD=A6=E5=91=8A=E7=A1=AE=E8=AE=A4?= =?UTF-8?q?=E6=A1=86=E5=92=8C=E8=BF=87=E6=BB=A4=20prompt=5Fcache=5Fretenti?= =?UTF-8?q?on=20=E5=8F=82=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 前端: EditAccountModal 和 CreateAccountModal 添加 409 mixed_channel_warning 处理 - 前端: 弹出确认框让用户确认混合渠道风险 - 后端: 过滤 OpenAI 请求中的 prompt_cache_retention 参数(上游不支持) - 添加中英文翻译 Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com> --- .../service/openai_gateway_service.go | 6 ++ .../components/account/CreateAccountModal.vue | 60 ++++++++++++++----- .../components/account/EditAccountModal.vue | 32 +++++++++- frontend/src/i18n/locales/en.ts | 1 + frontend/src/i18n/locales/zh.ts | 1 + 5 files changed, 82 insertions(+), 18 deletions(-) diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index c7d94882..45b4c69c 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -649,6 +649,12 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco bodyModified = true } } + + // Remove prompt_cache_retention (not supported by upstream OpenAI API) + if _, has := reqBody["prompt_cache_retention"]; has { + delete(reqBody, "prompt_cache_retention") + bodyModified = true + } } // Re-serialize body only if modified diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index c81de00e..05f328ac 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -2157,6 +2157,46 @@ const handleClose = () => { emit('close') } +// Helper function to create account with mixed channel warning handling +const doCreateAccount = async (payload: any, confirmMixedChannelRisk = false) => { + if (confirmMixedChannelRisk) { + payload.confirm_mixed_channel_risk = true + } + + submitting.value = true + try { + await adminAPI.accounts.create(payload) + appStore.showSuccess(t('admin.accounts.accountCreated')) + emit('created') + handleClose() + } catch (error: any) { + // Handle 409 mixed_channel_warning - show confirmation dialog + if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning') { + const details = error.response.data.details || {} + const groupName = details.group_name || 'Unknown' + const currentPlatform = details.current_platform || 'Unknown' + const otherPlatform = details.other_platform || 'Unknown' + + const confirmMessage = t('admin.accounts.mixedChannelWarning', { + groupName, + currentPlatform, + otherPlatform + }) + + if (confirm(confirmMessage)) { + // Retry with confirmation flag + submitting.value = false + await doCreateAccount(payload, true) + return + } + } else { + appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) + } + } finally { + submitting.value = false + } +} + const handleSubmit = async () => { // For OAuth-based type, handle OAuth flow (goes to step 2) if (isOAuthFlow.value) { @@ -2213,21 +2253,11 @@ const handleSubmit = async () => { form.credentials = credentials - submitting.value = true - try { - await adminAPI.accounts.create({ - ...form, - group_ids: form.group_ids, - auto_pause_on_expired: autoPauseOnExpired.value - }) - appStore.showSuccess(t('admin.accounts.accountCreated')) - emit('created') - handleClose() - } catch (error: any) { - appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) - } finally { - submitting.value = false - } + await doCreateAccount({ + ...form, + group_ids: form.group_ids, + auto_pause_on_expired: autoPauseOnExpired.value + }) } const goBackToBasicInfo = () => { diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index d27364f1..63b54df0 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -8,7 +8,7 @@
@@ -1294,12 +1294,17 @@ const handleClose = () => { emit('close') } -const handleSubmit = async () => { +const handleSubmit = async (confirmMixedChannelRisk = false) => { if (!props.account) return submitting.value = true try { const updatePayload: Record = { ...form } + + // Add confirmation flag if user confirmed mixed channel risk + if (confirmMixedChannelRisk) { + updatePayload.confirm_mixed_channel_risk = true + } // 后端期望 proxy_id: 0 表示清除代理,而不是 null if (updatePayload.proxy_id === null) { updatePayload.proxy_id = 0 @@ -1415,7 +1420,28 @@ const handleSubmit = async () => { emit('updated') handleClose() } catch (error: any) { - appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) + // Handle 409 mixed_channel_warning - show confirmation dialog + if (error.response?.status === 409 && error.response?.data?.error === 'mixed_channel_warning') { + const details = error.response.data.details || {} + const groupName = details.group_name || 'Unknown' + const currentPlatform = details.current_platform || 'Unknown' + const otherPlatform = details.other_platform || 'Unknown' + + const confirmMessage = t('admin.accounts.mixedChannelWarning', { + groupName, + currentPlatform, + otherPlatform + }) + + if (confirm(confirmMessage)) { + // Retry with confirmation flag + submitting.value = false + await handleSubmit(true) + return + } + } else { + appStore.showError(error.response?.data?.message || error.response?.data?.detail || t('admin.accounts.failedToUpdate')) + } } finally { submitting.value = false } diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index b25b5a0b..b36d31e4 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1306,6 +1306,7 @@ export default { accountUpdated: 'Account updated successfully', failedToCreate: 'Failed to create account', failedToUpdate: 'Failed to update account', + mixedChannelWarning: 'Warning: Group "{groupName}" contains both {currentPlatform} and {otherPlatform} accounts. Mixing different channels may cause thinking block signature validation issues, which will fallback to non-thinking mode. Are you sure you want to continue?', pleaseEnterAccountName: 'Please enter account name', pleaseEnterApiKey: 'Please enter API Key', apiKeyIsRequired: 'API Key is required', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index b7be8557..ad8380a8 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1439,6 +1439,7 @@ export default { accountUpdated: '账号更新成功', failedToCreate: '创建账号失败', failedToUpdate: '更新账号失败', + mixedChannelWarning: '警告:分组 "{groupName}" 中同时包含 {currentPlatform} 和 {otherPlatform} 账号。混合使用不同渠道可能导致 thinking block 签名验证问题,会自动回退到非 thinking 模式。确定要继续吗?', pleaseEnterAccountName: '请输入账号名称', pleaseEnterApiKey: '请输入 API Key', apiKeyIsRequired: 'API Key 是必需的', From 6549a40cf40922f8b9a6cfd30ecb4ae9263c8ae7 Mon Sep 17 00:00:00 2001 From: nick8802754751 <> Date: Sat, 17 Jan 2026 16:16:47 +0800 Subject: [PATCH 013/214] =?UTF-8?q?refactor:=20=E4=BD=BF=E7=94=A8=20Confir?= =?UTF-8?q?mDialog=20=E7=BB=84=E4=BB=B6=E6=9B=BF=E4=BB=A3=E5=8E=9F?= =?UTF-8?q?=E7=94=9F=20confirm()=20=E5=AF=B9=E8=AF=9D=E6=A1=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - EditAccountModal 和 CreateAccountModal 使用 ConfirmDialog 组件 - 保存 pending payload 供确认后重试 - 添加 mixedChannelWarningTitle 翻译 Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com> --- .../components/account/CreateAccountModal.vue | 71 ++++++++++++----- .../components/account/EditAccountModal.vue | 76 +++++++++++++------ frontend/src/i18n/locales/en.ts | 1 + frontend/src/i18n/locales/zh.ts | 1 + 4 files changed, 106 insertions(+), 43 deletions(-) diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 05f328ac..5702ccd6 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -1615,6 +1615,18 @@
+ + + diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index b36d31e4..1f5471e6 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1306,6 +1306,7 @@ export default { accountUpdated: 'Account updated successfully', failedToCreate: 'Failed to create account', failedToUpdate: 'Failed to update account', + mixedChannelWarningTitle: 'Mixed Channel Warning', mixedChannelWarning: 'Warning: Group "{groupName}" contains both {currentPlatform} and {otherPlatform} accounts. Mixing different channels may cause thinking block signature validation issues, which will fallback to non-thinking mode. Are you sure you want to continue?', pleaseEnterAccountName: 'Please enter account name', pleaseEnterApiKey: 'Please enter API Key', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index ad8380a8..b931b555 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1439,6 +1439,7 @@ export default { accountUpdated: '账号更新成功', failedToCreate: '创建账号失败', failedToUpdate: '更新账号失败', + mixedChannelWarningTitle: '混合渠道警告', mixedChannelWarning: '警告:分组 "{groupName}" 中同时包含 {currentPlatform} 和 {otherPlatform} 账号。混合使用不同渠道可能导致 thinking block 签名验证问题,会自动回退到非 thinking 模式。确定要继续吗?', pleaseEnterAccountName: '请输入账号名称', pleaseEnterApiKey: '请输入 API Key', From 32c47b1509287dbec5d4289bf0d23c5ea3a85f49 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 17 Jan 2026 18:16:34 +0800 Subject: [PATCH 014/214] fix(gateway): satisfy golangci-lint checks --- backend/internal/service/gateway_service.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index aa811bf5..ff143eee 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -420,7 +420,7 @@ func toPascalCase(value string) string { } runes := []rune(lower) runes[0] = unicode.ToUpper(runes[0]) - builder.WriteString(string(runes)) + _, _ = builder.WriteString(string(runes)) } return builder.String() } @@ -704,12 +704,8 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu } } - if _, ok := req["temperature"]; ok { - delete(req, "temperature") - } - if _, ok := req["tool_choice"]; ok { - delete(req, "tool_choice") - } + delete(req, "temperature") + delete(req, "tool_choice") newBody, err := json.Marshal(req) if err != nil { From eca3898410224e1295722282e802b425059df1cb Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 03:46:09 +0800 Subject: [PATCH 015/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20SSE=20?= =?UTF-8?q?=E7=BC=93=E5=86=B2=20input=5Fjson=5Fdelta=20=E5=8F=8D=E5=90=91?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 222 ++++++++++++++++---- 1 file changed, 185 insertions(+), 37 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 1f67f07d..02289a7a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3460,9 +3460,159 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } needModelReplace := originalModel != mappedModel - rewriteTools := mimicClaudeCode clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage + pendingEventLines := make([]string, 0, 4) + toolInputBuffers := make(map[int]string) + + transformToolInputJSON := func(raw string) string { + raw = strings.TrimSpace(raw) + if raw == "" { + return raw + } + + var parsed any + if err := json.Unmarshal([]byte(raw), &parsed); err != nil { + return replaceToolNamesInText(raw, toolNameMap) + } + + rewritten, changed := rewriteParamKeysInValue(parsed, toolNameMap) + if changed { + if bytes, err := json.Marshal(rewritten); err == nil { + return string(bytes) + } + } + return raw + } + + processSSEEvent := func(lines []string) ([]string, string, error) { + if len(lines) == 0 { + return nil, "", nil + } + + eventName := "" + dataLine := "" + for _, line := range lines { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "event:") { + eventName = strings.TrimSpace(strings.TrimPrefix(trimmed, "event:")) + continue + } + if dataLine == "" && sseDataRe.MatchString(trimmed) { + dataLine = sseDataRe.ReplaceAllString(trimmed, "") + } + } + + if eventName == "error" { + return nil, dataLine, errors.New("have error in stream") + } + + if dataLine == "" { + return []string{strings.Join(lines, "\n") + "\n\n"}, "", nil + } + + if dataLine == "[DONE]" { + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + dataLine + "\n\n" + return []string{block}, dataLine, nil + } + + var event map[string]any + if err := json.Unmarshal([]byte(dataLine), &event); err != nil { + replaced := replaceToolNamesInText(dataLine, toolNameMap) + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + replaced + "\n\n" + return []string{block}, replaced, nil + } + + eventType, _ := event["type"].(string) + if eventName == "" { + eventName = eventType + } + + if needModelReplace && eventType == "message_start" { + if msg, ok := event["message"].(map[string]any); ok { + if model, ok := msg["model"].(string); ok && model == mappedModel { + msg["model"] = originalModel + } + } + } + + if eventType == "content_block_delta" { + if delta, ok := event["delta"].(map[string]any); ok { + if deltaType, _ := delta["type"].(string); deltaType == "input_json_delta" { + if indexVal, ok := event["index"].(float64); ok { + index := int(indexVal) + if partial, ok := delta["partial_json"].(string); ok { + toolInputBuffers[index] += partial + } + } + return nil, dataLine, nil + } + } + } + + if eventType == "content_block_stop" { + if indexVal, ok := event["index"].(float64); ok { + index := int(indexVal) + if buffered := toolInputBuffers[index]; buffered != "" { + delete(toolInputBuffers, index) + + transformed := transformToolInputJSON(buffered) + synthetic := map[string]any{ + "type": "content_block_delta", + "index": index, + "delta": map[string]any{ + "type": "input_json_delta", + "partial_json": transformed, + }, + } + + synthBytes, synthErr := json.Marshal(synthetic) + if synthErr == nil { + synthBlock := "event: content_block_delta\n" + "data: " + string(synthBytes) + "\n\n" + + rewriteToolNamesInValue(event, toolNameMap) + stopBytes, stopErr := json.Marshal(event) + if stopErr == nil { + stopBlock := "" + if eventName != "" { + stopBlock = "event: " + eventName + "\n" + } + stopBlock += "data: " + string(stopBytes) + "\n\n" + return []string{synthBlock, stopBlock}, string(stopBytes), nil + } + } + } + } + } + + rewriteToolNamesInValue(event, toolNameMap) + newData, err := json.Marshal(event) + if err != nil { + replaced := replaceToolNamesInText(dataLine, toolNameMap) + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + replaced + "\n\n" + return []string{block}, replaced, nil + } + + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + string(newData) + "\n\n" + return []string{block}, string(newData), nil + } + for { select { case ev, ok := <-events: @@ -3491,45 +3641,43 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http return &streamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream read error: %w", ev.err) } line := ev.line - if line == "event: error" { - // 上游返回错误事件,如果客户端已断开仍返回已收集的 usage - if clientDisconnected { - return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + trimmed := strings.TrimSpace(line) + + if trimmed == "" { + if len(pendingEventLines) == 0 { + continue } - return nil, errors.New("have error in stream") + + outputBlocks, data, err := processSSEEvent(pendingEventLines) + pendingEventLines = pendingEventLines[:0] + if err != nil { + if clientDisconnected { + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } + return nil, err + } + + for _, block := range outputBlocks { + if !clientDisconnected { + if _, werr := fmt.Fprint(w, block); werr != nil { + clientDisconnected = true + log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") + break + } + flusher.Flush() + } + if data != "" { + if firstTokenMs == nil && data != "[DONE]" { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + s.parseSSEUsage(data, usage) + } + } + continue } - // Extract data from SSE line (supports both "data: " and "data:" formats) - var data string - if sseDataRe.MatchString(line) { - // 如果有模型映射,替换响应中的model字段 - if needModelReplace { - line = s.replaceModelInSSELine(line, mappedModel, originalModel) - } - if rewriteTools { - line = s.replaceToolNamesInSSELine(line, toolNameMap) - } - data = sseDataRe.ReplaceAllString(line, "") - } - - // 写入客户端(统一处理 data 行和非 data 行) - if !clientDisconnected { - if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { - clientDisconnected = true - log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") - } else { - flusher.Flush() - } - } - - // 无论客户端是否断开,都解析 usage(仅对 data 行) - if data != "" { - if firstTokenMs == nil && data != "[DONE]" { - ms := int(time.Since(startTime).Milliseconds()) - firstTokenMs = &ms - } - s.parseSSEUsage(data, usage) - } + pendingEventLines = append(pendingEventLines, line) case <-intervalCh: lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) From a05b8b56e3f306f5a08a15363d16a2db2c734b50 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 03:46:09 +0800 Subject: [PATCH 016/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20SSE=20?= =?UTF-8?q?=E7=BC=93=E5=86=B2=20input=5Fjson=5Fdelta=20=E5=8F=8D=E5=90=91?= =?UTF-8?q?=E8=BD=AC=E6=8D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 222 ++++++++++++++++---- 1 file changed, 185 insertions(+), 37 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index fb2d40a3..8a6770c8 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3315,9 +3315,159 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } needModelReplace := originalModel != mappedModel - rewriteTools := mimicClaudeCode clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage + pendingEventLines := make([]string, 0, 4) + toolInputBuffers := make(map[int]string) + + transformToolInputJSON := func(raw string) string { + raw = strings.TrimSpace(raw) + if raw == "" { + return raw + } + + var parsed any + if err := json.Unmarshal([]byte(raw), &parsed); err != nil { + return replaceToolNamesInText(raw, toolNameMap) + } + + rewritten, changed := rewriteParamKeysInValue(parsed, toolNameMap) + if changed { + if bytes, err := json.Marshal(rewritten); err == nil { + return string(bytes) + } + } + return raw + } + + processSSEEvent := func(lines []string) ([]string, string, error) { + if len(lines) == 0 { + return nil, "", nil + } + + eventName := "" + dataLine := "" + for _, line := range lines { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "event:") { + eventName = strings.TrimSpace(strings.TrimPrefix(trimmed, "event:")) + continue + } + if dataLine == "" && sseDataRe.MatchString(trimmed) { + dataLine = sseDataRe.ReplaceAllString(trimmed, "") + } + } + + if eventName == "error" { + return nil, dataLine, errors.New("have error in stream") + } + + if dataLine == "" { + return []string{strings.Join(lines, "\n") + "\n\n"}, "", nil + } + + if dataLine == "[DONE]" { + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + dataLine + "\n\n" + return []string{block}, dataLine, nil + } + + var event map[string]any + if err := json.Unmarshal([]byte(dataLine), &event); err != nil { + replaced := replaceToolNamesInText(dataLine, toolNameMap) + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + replaced + "\n\n" + return []string{block}, replaced, nil + } + + eventType, _ := event["type"].(string) + if eventName == "" { + eventName = eventType + } + + if needModelReplace && eventType == "message_start" { + if msg, ok := event["message"].(map[string]any); ok { + if model, ok := msg["model"].(string); ok && model == mappedModel { + msg["model"] = originalModel + } + } + } + + if eventType == "content_block_delta" { + if delta, ok := event["delta"].(map[string]any); ok { + if deltaType, _ := delta["type"].(string); deltaType == "input_json_delta" { + if indexVal, ok := event["index"].(float64); ok { + index := int(indexVal) + if partial, ok := delta["partial_json"].(string); ok { + toolInputBuffers[index] += partial + } + } + return nil, dataLine, nil + } + } + } + + if eventType == "content_block_stop" { + if indexVal, ok := event["index"].(float64); ok { + index := int(indexVal) + if buffered := toolInputBuffers[index]; buffered != "" { + delete(toolInputBuffers, index) + + transformed := transformToolInputJSON(buffered) + synthetic := map[string]any{ + "type": "content_block_delta", + "index": index, + "delta": map[string]any{ + "type": "input_json_delta", + "partial_json": transformed, + }, + } + + synthBytes, synthErr := json.Marshal(synthetic) + if synthErr == nil { + synthBlock := "event: content_block_delta\n" + "data: " + string(synthBytes) + "\n\n" + + rewriteToolNamesInValue(event, toolNameMap) + stopBytes, stopErr := json.Marshal(event) + if stopErr == nil { + stopBlock := "" + if eventName != "" { + stopBlock = "event: " + eventName + "\n" + } + stopBlock += "data: " + string(stopBytes) + "\n\n" + return []string{synthBlock, stopBlock}, string(stopBytes), nil + } + } + } + } + } + + rewriteToolNamesInValue(event, toolNameMap) + newData, err := json.Marshal(event) + if err != nil { + replaced := replaceToolNamesInText(dataLine, toolNameMap) + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + replaced + "\n\n" + return []string{block}, replaced, nil + } + + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + string(newData) + "\n\n" + return []string{block}, string(newData), nil + } + for { select { case ev, ok := <-events: @@ -3346,45 +3496,43 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http return &streamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream read error: %w", ev.err) } line := ev.line - if line == "event: error" { - // 上游返回错误事件,如果客户端已断开仍返回已收集的 usage - if clientDisconnected { - return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + trimmed := strings.TrimSpace(line) + + if trimmed == "" { + if len(pendingEventLines) == 0 { + continue } - return nil, errors.New("have error in stream") + + outputBlocks, data, err := processSSEEvent(pendingEventLines) + pendingEventLines = pendingEventLines[:0] + if err != nil { + if clientDisconnected { + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } + return nil, err + } + + for _, block := range outputBlocks { + if !clientDisconnected { + if _, werr := fmt.Fprint(w, block); werr != nil { + clientDisconnected = true + log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") + break + } + flusher.Flush() + } + if data != "" { + if firstTokenMs == nil && data != "[DONE]" { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + s.parseSSEUsage(data, usage) + } + } + continue } - // Extract data from SSE line (supports both "data: " and "data:" formats) - var data string - if sseDataRe.MatchString(line) { - // 如果有模型映射,替换响应中的model字段 - if needModelReplace { - line = s.replaceModelInSSELine(line, mappedModel, originalModel) - } - if rewriteTools { - line = s.replaceToolNamesInSSELine(line, toolNameMap) - } - data = sseDataRe.ReplaceAllString(line, "") - } - - // 写入客户端(统一处理 data 行和非 data 行) - if !clientDisconnected { - if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { - clientDisconnected = true - log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") - } else { - flusher.Flush() - } - } - - // 无论客户端是否断开,都解析 usage(仅对 data 行) - if data != "" { - if firstTokenMs == nil && data != "[DONE]" { - ms := int(time.Since(startTime).Milliseconds()) - firstTokenMs = &ms - } - s.parseSSEUsage(data, usage) - } + pendingEventLines = append(pendingEventLines, line) case <-intervalCh: lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) From 02db4c76714a48067b736496e10250ea520846be Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 03:53:08 +0800 Subject: [PATCH 017/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=B5=81=E5=BC=8F=20tool=20=E8=BE=93=E5=85=A5?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E8=BD=AC=E6=8D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 57 +-------------------- 1 file changed, 1 insertion(+), 56 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 8a6770c8..749dcc21 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3391,7 +3391,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http eventName = eventType } - if needModelReplace && eventType == "message_start" { + if needModelReplace { if msg, ok := event["message"].(map[string]any); ok { if model, ok := msg["model"].(string); ok && model == mappedModel { msg["model"] = originalModel @@ -3556,45 +3556,6 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } -// replaceModelInSSELine 替换SSE数据行中的model字段 -func (s *GatewayService) replaceModelInSSELine(line, fromModel, toModel string) string { - if !sseDataRe.MatchString(line) { - return line - } - data := sseDataRe.ReplaceAllString(line, "") - if data == "" || data == "[DONE]" { - return line - } - - var event map[string]any - if err := json.Unmarshal([]byte(data), &event); err != nil { - return line - } - - // 只替换 message_start 事件中的 message.model - if event["type"] != "message_start" { - return line - } - - msg, ok := event["message"].(map[string]any) - if !ok { - return line - } - - model, ok := msg["model"].(string) - if !ok || model != fromModel { - return line - } - - msg["model"] = toModel - newData, err := json.Marshal(event) - if err != nil { - return line - } - - return "data: " + string(newData) -} - func rewriteParamKeysInValue(value any, cache map[string]string) (any, bool) { switch v := value.(type) { case map[string]any: @@ -3715,22 +3676,6 @@ func replaceToolNamesInText(text string, toolNameMap map[string]string) string { return output } -func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[string]string) string { - if !sseDataRe.MatchString(line) { - return line - } - data := sseDataRe.ReplaceAllString(line, "") - if data == "" || data == "[DONE]" { - return line - } - - replaced := replaceToolNamesInText(data, toolNameMap) - if replaced == data { - return line - } - return "data: " + replaced -} - func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { // 解析message_start获取input tokens(标准Claude API格式) var msgStart struct { From eb7d8302967e19b0c3dee247ecbc8ca4b294e0b4 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 03:53:08 +0800 Subject: [PATCH 018/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E6=B5=81=E5=BC=8F=20tool=20=E8=BE=93=E5=85=A5?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E8=BD=AC=E6=8D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 57 +-------------------- 1 file changed, 1 insertion(+), 56 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 02289a7a..10b5c169 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3536,7 +3536,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http eventName = eventType } - if needModelReplace && eventType == "message_start" { + if needModelReplace { if msg, ok := event["message"].(map[string]any); ok { if model, ok := msg["model"].(string); ok && model == mappedModel { msg["model"] = originalModel @@ -3701,45 +3701,6 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } -// replaceModelInSSELine 替换SSE数据行中的model字段 -func (s *GatewayService) replaceModelInSSELine(line, fromModel, toModel string) string { - if !sseDataRe.MatchString(line) { - return line - } - data := sseDataRe.ReplaceAllString(line, "") - if data == "" || data == "[DONE]" { - return line - } - - var event map[string]any - if err := json.Unmarshal([]byte(data), &event); err != nil { - return line - } - - // 只替换 message_start 事件中的 message.model - if event["type"] != "message_start" { - return line - } - - msg, ok := event["message"].(map[string]any) - if !ok { - return line - } - - model, ok := msg["model"].(string) - if !ok || model != fromModel { - return line - } - - msg["model"] = toModel - newData, err := json.Marshal(event) - if err != nil { - return line - } - - return "data: " + string(newData) -} - func rewriteParamKeysInValue(value any, cache map[string]string) (any, bool) { switch v := value.(type) { case map[string]any: @@ -3860,22 +3821,6 @@ func replaceToolNamesInText(text string, toolNameMap map[string]string) string { return output } -func (s *GatewayService) replaceToolNamesInSSELine(line string, toolNameMap map[string]string) string { - if !sseDataRe.MatchString(line) { - return line - } - data := sseDataRe.ReplaceAllString(line, "") - if data == "" || data == "[DONE]" { - return line - } - - replaced := replaceToolNamesInText(data, toolNameMap) - if replaced == data { - return line - } - return "data: " + replaced -} - func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { // 解析message_start获取input tokens(标准Claude API格式) var msgStart struct { From 26298c4a5fe58c43f908fb0262c05ef38422da6b Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 13:53:39 +0800 Subject: [PATCH 019/214] fix(openai): emit OpenAI-compatible SSE error events --- .../service/openai_gateway_service.go | 19 ++++++++++++++++--- .../service/openai_gateway_service_test.go | 8 ++++---- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 87ad37a6..66ac1601 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -1067,7 +1067,9 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp // 记录上次收到上游数据的时间,用于控制 keepalive 发送频率 lastDataAt := time.Now() - // 仅发送一次错误事件,避免多次写入导致协议混乱(写失败时尽力通知客户端) + // 仅发送一次错误事件,避免多次写入导致协议混乱。 + // 注意:OpenAI `/v1/responses` streaming 事件必须符合 OpenAI Responses schema; + // 否则下游 SDK(例如 OpenCode)会因为类型校验失败而报错。 errorEventSent := false clientDisconnected := false // 客户端断开后继续 drain 上游以收集 usage sendErrorEvent := func(reason string) { @@ -1075,8 +1077,19 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp return } errorEventSent = true - _, _ = fmt.Fprintf(w, "event: error\ndata: {\"error\":\"%s\"}\n\n", reason) - flusher.Flush() + payload := map[string]any{ + "type": "error", + "sequence_number": 0, + "error": map[string]any{ + "type": "upstream_error", + "message": reason, + "code": reason, + }, + } + if b, err := json.Marshal(payload); err == nil { + _, _ = fmt.Fprintf(w, "data: %s\n\n", b) + flusher.Flush() + } } needModelReplace := originalModel != mappedModel diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 3ec37544..57b73245 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -188,8 +188,8 @@ func TestOpenAIStreamingTimeout(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "stream data interval timeout") { t.Fatalf("expected stream timeout error, got %v", err) } - if !strings.Contains(rec.Body.String(), "stream_timeout") { - t.Fatalf("expected stream_timeout SSE error, got %q", rec.Body.String()) + if !strings.Contains(rec.Body.String(), "\"type\":\"error\"") || !strings.Contains(rec.Body.String(), "stream_timeout") { + t.Fatalf("expected OpenAI-compatible error SSE event, got %q", rec.Body.String()) } } @@ -305,8 +305,8 @@ func TestOpenAIStreamingTooLong(t *testing.T) { if !errors.Is(err, bufio.ErrTooLong) { t.Fatalf("expected ErrTooLong, got %v", err) } - if !strings.Contains(rec.Body.String(), "response_too_large") { - t.Fatalf("expected response_too_large SSE error, got %q", rec.Body.String()) + if !strings.Contains(rec.Body.String(), "\"type\":\"error\"") || !strings.Contains(rec.Body.String(), "response_too_large") { + t.Fatalf("expected OpenAI-compatible error SSE event, got %q", rec.Body.String()) } } From bba5b3c037f198ef1fdcbb46f846ac88d841a62c Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 15:01:32 +0800 Subject: [PATCH 020/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20OAuth=20?= =?UTF-8?q?=E8=AF=B7=E6=B1=82=E7=BB=9F=E4=B8=80=20user=5Fid=20=E4=B8=8E?= =?UTF-8?q?=E6=8C=87=E7=BA=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 10b5c169..f3a21bd6 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2904,7 +2904,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // OAuth账号:应用统一指纹 var fingerprint *Fingerprint - if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { + if account.IsOAuth() && s.identityService != nil { // 1. 获取或创建指纹(包含随机生成的ClientID) fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err != nil { @@ -2957,7 +2957,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" && mimicClaudeCode { + if tokenType == "oauth" { applyClaudeOAuthHeaderDefaults(req, reqStream) } @@ -4257,7 +4257,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用统一指纹和重写 userID - if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { + if account.IsOAuth() && s.identityService != nil { fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err == nil { accountUUID := account.GetExtraString("account_uuid") @@ -4292,7 +4292,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用指纹到请求头 - if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { + if account.IsOAuth() && s.identityService != nil { fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if fp != nil { s.identityService.ApplyFingerprint(req, fp) @@ -4306,7 +4306,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" && mimicClaudeCode { + if tokenType == "oauth" { applyClaudeOAuthHeaderDefaults(req, false) } From 49be9d08f354a1f12c2410d57f31d3aa0adf09a0 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 15:01:32 +0800 Subject: [PATCH 021/214] =?UTF-8?q?fix(=E7=BD=91=E5=85=B3):=20OAuth=20?= =?UTF-8?q?=E8=AF=B7=E6=B1=82=E7=BB=9F=E4=B8=80=20user=5Fid=20=E4=B8=8E?= =?UTF-8?q?=E6=8C=87=E7=BA=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/gateway_service.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 749dcc21..4bb2af95 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2759,7 +2759,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // OAuth账号:应用统一指纹 var fingerprint *Fingerprint - if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { + if account.IsOAuth() && s.identityService != nil { // 1. 获取或创建指纹(包含随机生成的ClientID) fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err != nil { @@ -2812,7 +2812,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" && mimicClaudeCode { + if tokenType == "oauth" { applyClaudeOAuthHeaderDefaults(req, reqStream) } @@ -4112,7 +4112,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用统一指纹和重写 userID - if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { + if account.IsOAuth() && s.identityService != nil { fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err == nil { accountUUID := account.GetExtraString("account_uuid") @@ -4147,7 +4147,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用指纹到请求头 - if account.IsOAuth() && mimicClaudeCode && s.identityService != nil { + if account.IsOAuth() && s.identityService != nil { fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if fp != nil { s.identityService.ApplyFingerprint(req, fp) @@ -4161,7 +4161,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") } - if tokenType == "oauth" && mimicClaudeCode { + if tokenType == "oauth" { applyClaudeOAuthHeaderDefaults(req, false) } From 2f2e76f9c640d45c1cc7ddd46ef70c752d38e7f4 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Mon, 19 Jan 2026 16:20:24 +0800 Subject: [PATCH 022/214] fix(gateway): gate streaming tool rewrites behind mimic --- backend/internal/service/gateway_service.go | 26 ++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index f3a21bd6..8893dac1 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3463,9 +3463,15 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage pendingEventLines := make([]string, 0, 4) - toolInputBuffers := make(map[int]string) + var toolInputBuffers map[int]string + if mimicClaudeCode { + toolInputBuffers = make(map[int]string) + } transformToolInputJSON := func(raw string) string { + if !mimicClaudeCode { + return raw + } raw = strings.TrimSpace(raw) if raw == "" { return raw @@ -3522,7 +3528,10 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http var event map[string]any if err := json.Unmarshal([]byte(dataLine), &event); err != nil { - replaced := replaceToolNamesInText(dataLine, toolNameMap) + replaced := dataLine + if mimicClaudeCode { + replaced = replaceToolNamesInText(dataLine, toolNameMap) + } block := "" if eventName != "" { block = "event: " + eventName + "\n" @@ -3544,7 +3553,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } } - if eventType == "content_block_delta" { + if mimicClaudeCode && eventType == "content_block_delta" { if delta, ok := event["delta"].(map[string]any); ok { if deltaType, _ := delta["type"].(string); deltaType == "input_json_delta" { if indexVal, ok := event["index"].(float64); ok { @@ -3558,7 +3567,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } } - if eventType == "content_block_stop" { + if mimicClaudeCode && eventType == "content_block_stop" { if indexVal, ok := event["index"].(float64); ok { index := int(indexVal) if buffered := toolInputBuffers[index]; buffered != "" { @@ -3593,10 +3602,15 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } } - rewriteToolNamesInValue(event, toolNameMap) + if mimicClaudeCode { + rewriteToolNamesInValue(event, toolNameMap) + } newData, err := json.Marshal(event) if err != nil { - replaced := replaceToolNamesInText(dataLine, toolNameMap) + replaced := dataLine + if mimicClaudeCode { + replaced = replaceToolNamesInText(dataLine, toolNameMap) + } block := "" if eventName != "" { block = "event: " + eventName + "\n" From d1a6303e490096afd2763e1c3d9be969e0292e8c Mon Sep 17 00:00:00 2001 From: song Date: Tue, 20 Jan 2026 00:52:27 +0800 Subject: [PATCH 023/214] =?UTF-8?q?fix(antigravity):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=20Claude=20=E9=9D=9E=E6=B5=81=E5=BC=8F=E5=93=8D=E5=BA=94?= =?UTF-8?q?=E4=B8=A2=E5=A4=B1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../service/antigravity_gateway_service.go | 37 ++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 043f338d..15821207 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -2133,6 +2133,8 @@ func (s *AntigravityGatewayService) handleClaudeStreamToNonStreaming(c *gin.Cont var firstTokenMs *int var last map[string]any var lastWithParts map[string]any + var collectedImageParts []map[string]any // 收集所有包含图片的 parts + var collectedTextParts []string // 收集所有文本片段 type scanEvent struct { line string @@ -2230,6 +2232,15 @@ func (s *AntigravityGatewayService) handleClaudeStreamToNonStreaming(c *gin.Cont // 保留最后一个有 parts 的响应 if parts := extractGeminiParts(parsed); len(parts) > 0 { lastWithParts = parsed + // 收集包含图片和文本的 parts + for _, part := range parts { + if _, ok := part["inlineData"].(map[string]any); ok { + collectedImageParts = append(collectedImageParts, part) + } + if text, ok := part["text"].(string); ok && text != "" { + collectedTextParts = append(collectedTextParts, text) + } + } } case <-intervalCh: @@ -2252,8 +2263,32 @@ returnResponse: return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Empty response from upstream") } + // 如果收集到了图片 parts,需要合并到最终响应中 + if len(collectedImageParts) > 0 { + finalResponse = mergeImagePartsToResponse(finalResponse, collectedImageParts) + } + + // 如果收集到了文本,需要合并到最终响应中 + if len(collectedTextParts) > 0 { + finalResponse = mergeTextPartsToResponse(finalResponse, collectedTextParts) + } + + geminiPayload := finalResponse + if _, ok := finalResponse["response"]; !ok { + wrapped := map[string]any{ + "response": finalResponse, + } + if respID, ok := finalResponse["responseId"]; ok { + wrapped["responseId"] = respID + } + if modelVersion, ok := finalResponse["modelVersion"]; ok { + wrapped["modelVersion"] = modelVersion + } + geminiPayload = wrapped + } + // 序列化为 JSON(Gemini 格式) - geminiBody, err := json.Marshal(finalResponse) + geminiBody, err := json.Marshal(geminiPayload) if err != nil { return nil, fmt.Errorf("failed to marshal gemini response: %w", err) } From c43aa22cdb987e8ad9e0b328661a603d4f04a34e Mon Sep 17 00:00:00 2001 From: song Date: Tue, 20 Jan 2026 10:59:10 +0800 Subject: [PATCH 024/214] =?UTF-8?q?feat(antigravity):=20=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E6=8C=89=E6=98=A0=E5=B0=84=E6=A8=A1=E5=9E=8B=E8=AE=A1=E8=B4=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../service/antigravity_gateway_service.go | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 15821207..045902cb 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -29,7 +29,10 @@ const ( antigravityRetryMaxDelay = 16 * time.Second ) -const antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" +const ( + antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" + antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" +) // antigravityRetryLoopParams 重试循环的参数 type antigravityRetryLoopParams struct { @@ -711,6 +714,10 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, originalModel := claudeReq.Model mappedModel := s.getMappedModel(account, claudeReq.Model) quotaScope, _ := resolveAntigravityQuotaScope(originalModel) + billingModel := originalModel + if antigravityUseMappedModelForBilling() && strings.TrimSpace(mappedModel) != "" { + billingModel = mappedModel + } // 获取 access_token if s.tokenProvider == nil { @@ -971,7 +978,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return &ForwardResult{ RequestID: requestID, Usage: *usage, - Model: originalModel, // 使用原始模型用于计费和日志 + Model: billingModel, Stream: claudeReq.Stream, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, @@ -1280,6 +1287,10 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co } mappedModel := s.getMappedModel(account, originalModel) + billingModel := originalModel + if antigravityUseMappedModelForBilling() && strings.TrimSpace(mappedModel) != "" { + billingModel = mappedModel + } // 获取 access_token if s.tokenProvider == nil { @@ -1478,7 +1489,7 @@ handleSuccess: return &ForwardResult{ RequestID: requestID, Usage: *usage, - Model: originalModel, + Model: billingModel, Stream: stream, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, @@ -1525,6 +1536,11 @@ func antigravityUseScopeRateLimit() bool { return v == "1" || v == "true" || v == "yes" || v == "on" } +func antigravityUseMappedModelForBilling() bool { + v := strings.ToLower(strings.TrimSpace(os.Getenv(antigravityBillingModelEnv))) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { // 429 使用 Gemini 格式解析(从 body 解析重置时间) if statusCode == 429 { From 86d63f919d498e6118c06bb8c34cf05c904f415f Mon Sep 17 00:00:00 2001 From: song Date: Tue, 20 Jan 2026 11:38:40 +0800 Subject: [PATCH 025/214] =?UTF-8?q?feat(antigravity):=20=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E7=A7=92=E7=BA=A7=20fallback=20=E5=86=B7=E5=8D=B4=E6=97=B6?= =?UTF-8?q?=E9=97=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../service/antigravity_gateway_service.go | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 045902cb..54f4b5e6 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -13,6 +13,7 @@ import ( "net" "net/http" "os" + "strconv" "strings" "sync/atomic" "time" @@ -30,8 +31,9 @@ const ( ) const ( - antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" - antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" + antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" + antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" + antigravityFallbackSecondsEnv = "GATEWAY_ANTIGRAVITY_FALLBACK_COOLDOWN_SECONDS" ) // antigravityRetryLoopParams 重试循环的参数 @@ -1541,6 +1543,18 @@ func antigravityUseMappedModelForBilling() bool { return v == "1" || v == "true" || v == "yes" || v == "on" } +func antigravityFallbackCooldownSeconds() (time.Duration, bool) { + raw := strings.TrimSpace(os.Getenv(antigravityFallbackSecondsEnv)) + if raw == "" { + return 0, false + } + seconds, err := strconv.Atoi(raw) + if err != nil || seconds <= 0 { + return 0, false + } + return time.Duration(seconds) * time.Second, true +} + func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { // 429 使用 Gemini 格式解析(从 body 解析重置时间) if statusCode == 429 { @@ -1553,6 +1567,9 @@ func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, pre fallbackMinutes = s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes } defaultDur := time.Duration(fallbackMinutes) * time.Minute + if override, ok := antigravityFallbackCooldownSeconds(); ok { + defaultDur = override + } ra := time.Now().Add(defaultDur) if useScopeLimit { log.Printf("%s status=429 rate_limited scope=%s reset_in=%v (fallback)", prefix, quotaScope, defaultDur) From 64795a03e387f18d7e34d3e8c32fe055a7a79670 Mon Sep 17 00:00:00 2001 From: song Date: Tue, 20 Jan 2026 14:17:10 +0800 Subject: [PATCH 026/214] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E8=B4=A6=E5=8F=B7?= =?UTF-8?q?=E5=87=AD=E8=AF=81=E9=82=AE=E7=AE=B1=E6=9F=A5=E8=AF=A2=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../internal/handler/admin/account_handler.go | 88 +++++++++++++++++++ backend/internal/repository/account_repo.go | 31 +++++++ backend/internal/server/api_contract_test.go | 75 +++++++++++++++- backend/internal/server/routes/admin.go | 1 + backend/internal/service/account_service.go | 1 + .../service/account_service_delete_test.go | 4 + backend/internal/service/admin_service.go | 8 ++ .../service/gateway_multiplatform_test.go | 3 + .../service/gemini_multiplatform_test.go | 3 + 9 files changed, 213 insertions(+), 1 deletion(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 16fc86bb..579ada14 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -129,6 +129,13 @@ type BulkUpdateAccountsRequest struct { ConfirmMixedChannelRisk *bool `json:"confirm_mixed_channel_risk"` // 用户确认混合渠道风险 } +// AccountLookupRequest 用于凭证身份信息查找账号 +type AccountLookupRequest struct { + Platform string `json:"platform" binding:"required"` + Emails []string `json:"emails" binding:"required,min=1"` + IdentityType string `json:"identity_type"` +} + // AccountWithConcurrency extends Account with real-time concurrency info type AccountWithConcurrency struct { *dto.Account @@ -258,6 +265,87 @@ func (h *AccountHandler) List(c *gin.Context) { response.Paginated(c, result, total, page, pageSize) } +// Lookup 根据凭证身份信息查找账号 +// POST /api/v1/admin/accounts/lookup +func (h *AccountHandler) Lookup(c *gin.Context) { + var req AccountLookupRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + identityType := strings.TrimSpace(req.IdentityType) + if identityType == "" { + identityType = "credential_email" + } + if identityType != "credential_email" { + response.BadRequest(c, "Unsupported identity_type") + return + } + + platform := strings.TrimSpace(req.Platform) + if platform == "" { + response.BadRequest(c, "Platform is required") + return + } + + normalized := make([]string, 0, len(req.Emails)) + seen := make(map[string]struct{}) + for _, email := range req.Emails { + cleaned := strings.ToLower(strings.TrimSpace(email)) + if cleaned == "" { + continue + } + if _, ok := seen[cleaned]; ok { + continue + } + seen[cleaned] = struct{}{} + normalized = append(normalized, cleaned) + } + if len(normalized) == 0 { + response.BadRequest(c, "Emails is required") + return + } + + accounts, err := h.adminService.LookupAccountsByCredentialEmail(c.Request.Context(), platform, normalized) + if err != nil { + response.ErrorFrom(c, err) + return + } + + matchedMap := make(map[string]service.Account) + for _, account := range accounts { + email := strings.ToLower(strings.TrimSpace(account.GetCredential("email"))) + if email == "" { + continue + } + if _, ok := matchedMap[email]; ok { + continue + } + matchedMap[email] = account + } + + matched := make([]gin.H, 0, len(matchedMap)) + missing := make([]string, 0) + for _, email := range normalized { + if account, ok := matchedMap[email]; ok { + matched = append(matched, gin.H{ + "email": email, + "account_id": account.ID, + "platform": account.Platform, + "name": account.Name, + }) + continue + } + missing = append(missing, email) + } + + response.Success(c, gin.H{ + "matched": matched, + "missing": missing, + }) +} + // GetByID handles getting an account by ID // GET /api/v1/admin/accounts/:id func (h *AccountHandler) GetByID(c *gin.Context) { diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 8aa487ac..6fcb5290 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -473,6 +473,37 @@ func (r *accountRepository) ListByPlatform(ctx context.Context, platform string) return r.accountsToService(ctx, accounts) } +func (r *accountRepository) ListByPlatformAndCredentialEmails( + ctx context.Context, + platform string, + emails []string, +) ([]service.Account, error) { + if len(emails) == 0 { + return []service.Account{}, nil + } + args := make([]any, 0, len(emails)) + for _, email := range emails { + if email == "" { + continue + } + args = append(args, email) + } + if len(args) == 0 { + return []service.Account{}, nil + } + + accounts, err := r.client.Account.Query(). + Where(dbaccount.PlatformEQ(platform)). + Where(func(s *entsql.Selector) { + s.Where(sqljson.ValueIn(dbaccount.FieldCredentials, args, sqljson.Path("email"))) + }). + All(ctx) + if err != nil { + return nil, err + } + return r.accountsToService(ctx, accounts) +} + func (r *accountRepository) UpdateLastUsed(ctx context.Context, id int64) error { now := time.Now() _, err := r.client.Account.Update(). diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 80f3e408..81b94305 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -11,6 +11,7 @@ import ( "net/http" "net/http/httptest" "sort" + "strings" "testing" "time" @@ -341,6 +342,44 @@ func TestAPIContracts(t *testing.T) { } }`, }, + { + name: "POST /api/v1/admin/accounts/lookup", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + deps.accountRepo.lookupAccounts = []service.Account{ + { + ID: 101, + Name: "Alice Account", + Platform: "antigravity", + Credentials: map[string]any{ + "email": "alice@example.com", + }, + }, + } + }, + method: http.MethodPost, + path: "/api/v1/admin/accounts/lookup", + body: `{"platform":"antigravity","emails":["Alice@Example.com","bob@example.com"]}`, + headers: map[string]string{ + "Content-Type": "application/json", + }, + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "matched": [ + { + "email": "alice@example.com", + "account_id": 101, + "platform": "antigravity", + "name": "Alice Account" + } + ], + "missing": ["bob@example.com"] + } + }`, + }, { name: "POST /api/v1/admin/accounts/bulk-update", method: http.MethodPost, @@ -387,6 +426,7 @@ type contractDeps struct { apiKeyRepo *stubApiKeyRepo usageRepo *stubUsageLogRepo settingRepo *stubSettingRepo + accountRepo *stubAccountRepo } func newContractDeps(t *testing.T) *contractDeps { @@ -482,6 +522,7 @@ func newContractDeps(t *testing.T) *contractDeps { v1Admin.Use(adminAuth) v1Admin.GET("/settings", adminSettingHandler.GetSettings) v1Admin.POST("/accounts/bulk-update", adminAccountHandler.BulkUpdate) + v1Admin.POST("/accounts/lookup", adminAccountHandler.Lookup) return &contractDeps{ now: now, @@ -489,6 +530,7 @@ func newContractDeps(t *testing.T) *contractDeps { apiKeyRepo: apiKeyRepo, usageRepo: usageRepo, settingRepo: settingRepo, + accountRepo: &accountRepo, } } @@ -673,7 +715,8 @@ func (stubGroupRepo) DeleteAccountGroupsByGroupID(ctx context.Context, groupID i } type stubAccountRepo struct { - bulkUpdateIDs []int64 + bulkUpdateIDs []int64 + lookupAccounts []service.Account } func (s *stubAccountRepo) Create(ctx context.Context, account *service.Account) error { @@ -724,6 +767,36 @@ func (s *stubAccountRepo) ListByPlatform(ctx context.Context, platform string) ( return nil, errors.New("not implemented") } +func (s *stubAccountRepo) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]service.Account, error) { + if len(s.lookupAccounts) == 0 { + return nil, nil + } + emailSet := make(map[string]struct{}, len(emails)) + for _, email := range emails { + normalized := strings.ToLower(strings.TrimSpace(email)) + if normalized == "" { + continue + } + emailSet[normalized] = struct{}{} + } + var matches []service.Account + for i := range s.lookupAccounts { + account := &s.lookupAccounts[i] + if account.Platform != platform { + continue + } + accountEmail := strings.ToLower(strings.TrimSpace(account.GetCredential("email"))) + if accountEmail == "" { + continue + } + if _, ok := emailSet[accountEmail]; !ok { + continue + } + matches = append(matches, *account) + } + return matches, nil +} + func (s *stubAccountRepo) UpdateLastUsed(ctx context.Context, id int64) error { return errors.New("not implemented") } diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index ff05b32a..cf6fa942 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -197,6 +197,7 @@ func registerAccountRoutes(admin *gin.RouterGroup, h *handler.Handlers) { accounts := admin.Group("/accounts") { accounts.GET("", h.Admin.Account.List) + accounts.POST("/lookup", h.Admin.Account.Lookup) accounts.GET("/:id", h.Admin.Account.GetByID) accounts.POST("", h.Admin.Account.Create) accounts.POST("/sync/crs", h.Admin.Account.SyncFromCRS) diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index 90365d2f..72c5c5f8 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -33,6 +33,7 @@ type AccountRepository interface { ListByGroup(ctx context.Context, groupID int64) ([]Account, error) ListActive(ctx context.Context) ([]Account, error) ListByPlatform(ctx context.Context, platform string) ([]Account, error) + ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) UpdateLastUsed(ctx context.Context, id int64) error BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go index e5eabfc6..08b0d5b6 100644 --- a/backend/internal/service/account_service_delete_test.go +++ b/backend/internal/service/account_service_delete_test.go @@ -87,6 +87,10 @@ func (s *accountRepoStub) ListByPlatform(ctx context.Context, platform string) ( panic("unexpected ListByPlatform call") } +func (s *accountRepoStub) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) { + panic("unexpected ListByPlatformAndCredentialEmails call") +} + func (s *accountRepoStub) UpdateLastUsed(ctx context.Context, id int64) error { panic("unexpected UpdateLastUsed call") } diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 0afa0716..1b2c7ff4 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -40,6 +40,7 @@ type AdminService interface { CreateAccount(ctx context.Context, input *CreateAccountInput) (*Account, error) UpdateAccount(ctx context.Context, id int64, input *UpdateAccountInput) (*Account, error) DeleteAccount(ctx context.Context, id int64) error + LookupAccountsByCredentialEmail(ctx context.Context, platform string, emails []string) ([]Account, error) RefreshAccountCredentials(ctx context.Context, id int64) (*Account, error) ClearAccountError(ctx context.Context, id int64) (*Account, error) SetAccountError(ctx context.Context, id int64, errorMsg string) error @@ -793,6 +794,13 @@ func (s *adminServiceImpl) GetAccount(ctx context.Context, id int64) (*Account, return s.accountRepo.GetByID(ctx, id) } +func (s *adminServiceImpl) LookupAccountsByCredentialEmail(ctx context.Context, platform string, emails []string) ([]Account, error) { + if platform == "" || len(emails) == 0 { + return []Account{}, nil + } + return s.accountRepo.ListByPlatformAndCredentialEmails(ctx, platform, emails) +} + func (s *adminServiceImpl) GetAccountsByIDs(ctx context.Context, ids []int64) ([]*Account, error) { if len(ids) == 0 { return []*Account{}, nil diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index 4d17d5e1..ccae80fe 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -96,6 +96,9 @@ func (m *mockAccountRepoForPlatform) ListActive(ctx context.Context) ([]Account, func (m *mockAccountRepoForPlatform) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { return nil, nil } +func (m *mockAccountRepoForPlatform) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) { + return nil, nil +} func (m *mockAccountRepoForPlatform) UpdateLastUsed(ctx context.Context, id int64) error { return nil } diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index fc009873..20640b01 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -81,6 +81,9 @@ func (m *mockAccountRepoForGemini) ListActive(ctx context.Context) ([]Account, e func (m *mockAccountRepoForGemini) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { return nil, nil } +func (m *mockAccountRepoForGemini) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) { + return nil, nil +} func (m *mockAccountRepoForGemini) UpdateLastUsed(ctx context.Context, id int64) error { return nil } func (m *mockAccountRepoForGemini) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { return nil From d206721fc1134ed69c603b0539658eff3e2858e7 Mon Sep 17 00:00:00 2001 From: song Date: Tue, 20 Jan 2026 19:12:19 +0800 Subject: [PATCH 027/214] feat: make antigravity max retries configurable --- .../service/antigravity_gateway_service.go | 38 +++++++++++++------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 54f4b5e6..dc381db7 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -24,13 +24,14 @@ import ( ) const ( - antigravityStickySessionTTL = time.Hour - antigravityMaxRetries = 3 - antigravityRetryBaseDelay = 1 * time.Second - antigravityRetryMaxDelay = 16 * time.Second + antigravityStickySessionTTL = time.Hour + antigravityDefaultMaxRetries = 5 + antigravityRetryBaseDelay = 1 * time.Second + antigravityRetryMaxDelay = 16 * time.Second ) const ( + antigravityMaxRetriesEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES" antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" antigravityFallbackSecondsEnv = "GATEWAY_ANTIGRAVITY_FALLBACK_COOLDOWN_SECONDS" @@ -63,6 +64,7 @@ func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopRe if len(availableURLs) == 0 { availableURLs = antigravity.BaseURLs } + maxRetries := antigravityMaxRetries() var resp *http.Response var usedBaseURL string @@ -81,7 +83,7 @@ func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopRe urlFallbackLoop: for urlIdx, baseURL := range availableURLs { usedBaseURL = baseURL - for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { + for attempt := 1; attempt <= maxRetries; attempt++ { select { case <-p.ctx.Done(): log.Printf("%s status=context_canceled error=%v", p.prefix, p.ctx.Err()) @@ -114,8 +116,8 @@ urlFallbackLoop: log.Printf("%s URL fallback (connection error): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) continue urlFallbackLoop } - if attempt < antigravityMaxRetries { - log.Printf("%s status=request_failed retry=%d/%d error=%v", p.prefix, attempt, antigravityMaxRetries, err) + if attempt < maxRetries { + log.Printf("%s status=request_failed retry=%d/%d error=%v", p.prefix, attempt, maxRetries, err) if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { log.Printf("%s status=context_canceled_during_backoff", p.prefix) return nil, p.ctx.Err() @@ -138,8 +140,8 @@ urlFallbackLoop: continue urlFallbackLoop } - // 账户/模型配额限流,重试 3 次(指数退避) - if attempt < antigravityMaxRetries { + // 账户/模型配额限流,按最大重试次数做指数退避 + if attempt < maxRetries { upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ @@ -152,7 +154,7 @@ urlFallbackLoop: Message: upstreamMsg, Detail: getUpstreamDetail(respBody), }) - log.Printf("%s status=429 retry=%d/%d body=%s", p.prefix, attempt, antigravityMaxRetries, truncateForLog(respBody, 200)) + log.Printf("%s status=429 retry=%d/%d body=%s", p.prefix, attempt, maxRetries, truncateForLog(respBody, 200)) if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { log.Printf("%s status=context_canceled_during_backoff", p.prefix) return nil, p.ctx.Err() @@ -176,7 +178,7 @@ urlFallbackLoop: respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) _ = resp.Body.Close() - if attempt < antigravityMaxRetries { + if attempt < maxRetries { upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ @@ -189,7 +191,7 @@ urlFallbackLoop: Message: upstreamMsg, Detail: getUpstreamDetail(respBody), }) - log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 500)) + log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, maxRetries, truncateForLog(respBody, 500)) if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { log.Printf("%s status=context_canceled_during_backoff", p.prefix) return nil, p.ctx.Err() @@ -1538,6 +1540,18 @@ func antigravityUseScopeRateLimit() bool { return v == "1" || v == "true" || v == "yes" || v == "on" } +func antigravityMaxRetries() int { + raw := strings.TrimSpace(os.Getenv(antigravityMaxRetriesEnv)) + if raw == "" { + return antigravityDefaultMaxRetries + } + value, err := strconv.Atoi(raw) + if err != nil || value <= 0 { + return antigravityDefaultMaxRetries + } + return value +} + func antigravityUseMappedModelForBilling() bool { v := strings.ToLower(strings.TrimSpace(os.Getenv(antigravityBillingModelEnv))) return v == "1" || v == "true" || v == "yes" || v == "on" From 549c134bb855d77a60d71f658517a61228ef75e4 Mon Sep 17 00:00:00 2001 From: song Date: Tue, 20 Jan 2026 19:16:43 +0800 Subject: [PATCH 028/214] chore: gofmt antigravity gateway service --- backend/internal/service/antigravity_gateway_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index dc381db7..1a239484 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -31,7 +31,7 @@ const ( ) const ( - antigravityMaxRetriesEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES" + antigravityMaxRetriesEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES" antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" antigravityFallbackSecondsEnv = "GATEWAY_ANTIGRAVITY_FALLBACK_COOLDOWN_SECONDS" From 3a31fa476855bf78fd2da46f2644518f403ccfc6 Mon Sep 17 00:00:00 2001 From: song Date: Wed, 21 Jan 2026 11:50:38 +0800 Subject: [PATCH 029/214] =?UTF-8?q?fix:=20429=20=E9=99=90=E6=B5=81?= =?UTF-8?q?=E6=97=B6=E6=9B=B4=E6=96=B0=E8=B4=A6=E5=8F=B7=20last=5Fused=5Fa?= =?UTF-8?q?t?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 在设置限流标记时同时更新 last_used_at,使得刚触发 429 的账号 在后续调度中优先级降低,让其他账号有更多被选中的机会。 --- backend/internal/repository/account_repo.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 6fcb5290..440223eb 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -787,6 +787,7 @@ func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetA Where(dbaccount.IDEQ(id)). SetRateLimitedAt(now). SetRateLimitResetAt(resetAt). + SetLastUsedAt(now). Save(ctx) if err != nil { return err @@ -812,7 +813,7 @@ func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, i client := clientFromContext(ctx, r.client) result, err := client.ExecContext( ctx, - "UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW() WHERE id = $3 AND deleted_at IS NULL", + "UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW(), last_used_at = NOW() WHERE id = $3 AND deleted_at IS NULL", path, raw, id, From 71f8b9e47341430bc14f4ce06176f5e35af68099 Mon Sep 17 00:00:00 2001 From: 0xff26b9a8 <25315788+0xff26b9a8@users.noreply.github.com> Date: Tue, 20 Jan 2026 23:41:53 +0800 Subject: [PATCH 030/214] =?UTF-8?q?refactor(antigravity):=20=E6=8F=90?= =?UTF-8?q?=E5=8F=96=E5=B9=B6=E5=90=8C=E6=AD=A5=20Schema=20=E6=B8=85?= =?UTF-8?q?=E7=90=86=E9=80=BB=E8=BE=91=E8=87=B3=20schema=5Fcleaner.go?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 主要变更: 1. 重构代码结构: - 将 CleanJSONSchema 及其相关辅助函数从 request_transformer.go 提取到独立的 schema_cleaner.go 文件中,实现逻辑解耦。 2. 逻辑优化与修正: - 参考 Antigravity-Manager (json_schema.rs) 的实现逻辑,修正了 Schema 清洗策略。 --- .../pkg/antigravity/request_transformer.go | 242 +------- .../pkg/antigravity/response_transformer.go | 9 + .../pkg/antigravity/schema_cleaner.go | 526 ++++++++++++++++++ .../pkg/antigravity/stream_transformer.go | 9 + .../service/antigravity_gateway_service.go | 87 +++ 5 files changed, 636 insertions(+), 237 deletions(-) create mode 100644 backend/internal/pkg/antigravity/schema_cleaner.go diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 637a4ea8..1b21bd58 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -7,13 +7,11 @@ import ( "fmt" "log" "math/rand" - "os" "strconv" "strings" "sync" "time" - "github.com/gin-gonic/gin" "github.com/google/uuid" ) @@ -594,11 +592,14 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { } // 清理 JSON Schema - params := cleanJSONSchema(inputSchema) + // 1. 深度清理 [undefined] 值 + DeepCleanUndefined(inputSchema) + // 2. 转换为符合 Gemini v1internal 的 schema + params := CleanJSONSchema(inputSchema) // 为 nil schema 提供默认值 if params == nil { params = map[string]any{ - "type": "OBJECT", + "type": "object", // lowercase type "properties": map[string]any{}, } } @@ -631,236 +632,3 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { FunctionDeclarations: funcDecls, }} } - -// cleanJSONSchema 清理 JSON Schema,移除 Antigravity/Gemini 不支持的字段 -// 参考 proxycast 的实现,确保 schema 符合 JSON Schema draft 2020-12 -func cleanJSONSchema(schema map[string]any) map[string]any { - if schema == nil { - return nil - } - cleaned := cleanSchemaValue(schema, "$") - result, ok := cleaned.(map[string]any) - if !ok { - return nil - } - - // 确保有 type 字段(默认 OBJECT) - if _, hasType := result["type"]; !hasType { - result["type"] = "OBJECT" - } - - // 确保有 properties 字段(默认空对象) - if _, hasProps := result["properties"]; !hasProps { - result["properties"] = make(map[string]any) - } - - // 验证 required 中的字段都存在于 properties 中 - if required, ok := result["required"].([]any); ok { - if props, ok := result["properties"].(map[string]any); ok { - validRequired := make([]any, 0, len(required)) - for _, r := range required { - if reqName, ok := r.(string); ok { - if _, exists := props[reqName]; exists { - validRequired = append(validRequired, r) - } - } - } - if len(validRequired) > 0 { - result["required"] = validRequired - } else { - delete(result, "required") - } - } - } - - return result -} - -var schemaValidationKeys = map[string]bool{ - "minLength": true, - "maxLength": true, - "pattern": true, - "minimum": true, - "maximum": true, - "exclusiveMinimum": true, - "exclusiveMaximum": true, - "multipleOf": true, - "uniqueItems": true, - "minItems": true, - "maxItems": true, - "minProperties": true, - "maxProperties": true, - "patternProperties": true, - "propertyNames": true, - "dependencies": true, - "dependentSchemas": true, - "dependentRequired": true, -} - -var warnedSchemaKeys sync.Map - -func schemaCleaningWarningsEnabled() bool { - // 可通过环境变量强制开关,方便排查:SUB2API_SCHEMA_CLEAN_WARN=true/false - if v := strings.TrimSpace(os.Getenv("SUB2API_SCHEMA_CLEAN_WARN")); v != "" { - switch strings.ToLower(v) { - case "1", "true", "yes", "on": - return true - case "0", "false", "no", "off": - return false - } - } - // 默认:非 release 模式下输出(debug/test) - return gin.Mode() != gin.ReleaseMode -} - -func warnSchemaKeyRemovedOnce(key, path string) { - if !schemaCleaningWarningsEnabled() { - return - } - if !schemaValidationKeys[key] { - return - } - if _, loaded := warnedSchemaKeys.LoadOrStore(key, struct{}{}); loaded { - return - } - log.Printf("[SchemaClean] removed unsupported JSON Schema validation field key=%q path=%q", key, path) -} - -// excludedSchemaKeys 不支持的 schema 字段 -// 基于 Claude API (Vertex AI) 的实际支持情况 -// 支持: type, description, enum, properties, required, additionalProperties, items -// 不支持: minItems, maxItems, minLength, maxLength, pattern, minimum, maximum 等验证字段 -var excludedSchemaKeys = map[string]bool{ - // 元 schema 字段 - "$schema": true, - "$id": true, - "$ref": true, - - // 字符串验证(Gemini 不支持) - "minLength": true, - "maxLength": true, - "pattern": true, - - // 数字验证(Claude API 通过 Vertex AI 不支持这些字段) - "minimum": true, - "maximum": true, - "exclusiveMinimum": true, - "exclusiveMaximum": true, - "multipleOf": true, - - // 数组验证(Claude API 通过 Vertex AI 不支持这些字段) - "uniqueItems": true, - "minItems": true, - "maxItems": true, - - // 组合 schema(Gemini 不支持) - "oneOf": true, - "anyOf": true, - "allOf": true, - "not": true, - "if": true, - "then": true, - "else": true, - "$defs": true, - "definitions": true, - - // 对象验证(仅保留 properties/required/additionalProperties) - "minProperties": true, - "maxProperties": true, - "patternProperties": true, - "propertyNames": true, - "dependencies": true, - "dependentSchemas": true, - "dependentRequired": true, - - // 其他不支持的字段 - "default": true, - "const": true, - "examples": true, - "deprecated": true, - "readOnly": true, - "writeOnly": true, - "contentMediaType": true, - "contentEncoding": true, - - // Claude 特有字段 - "strict": true, -} - -// cleanSchemaValue 递归清理 schema 值 -func cleanSchemaValue(value any, path string) any { - switch v := value.(type) { - case map[string]any: - result := make(map[string]any) - for k, val := range v { - // 跳过不支持的字段 - if excludedSchemaKeys[k] { - warnSchemaKeyRemovedOnce(k, path) - continue - } - - // 特殊处理 type 字段 - if k == "type" { - result[k] = cleanTypeValue(val) - continue - } - - // 特殊处理 format 字段:只保留 Gemini 支持的 format 值 - if k == "format" { - if formatStr, ok := val.(string); ok { - // Gemini 只支持 date-time, date, time - if formatStr == "date-time" || formatStr == "date" || formatStr == "time" { - result[k] = val - } - // 其他 format 值直接跳过 - } - continue - } - - // 特殊处理 additionalProperties:Claude API 只支持布尔值,不支持 schema 对象 - if k == "additionalProperties" { - if boolVal, ok := val.(bool); ok { - result[k] = boolVal - } else { - // 如果是 schema 对象,转换为 false(更安全的默认值) - result[k] = false - } - continue - } - - // 递归清理所有值 - result[k] = cleanSchemaValue(val, path+"."+k) - } - return result - - case []any: - // 递归处理数组中的每个元素 - cleaned := make([]any, 0, len(v)) - for i, item := range v { - cleaned = append(cleaned, cleanSchemaValue(item, fmt.Sprintf("%s[%d]", path, i))) - } - return cleaned - - default: - return value - } -} - -// cleanTypeValue 处理 type 字段,转换为大写 -func cleanTypeValue(value any) any { - switch v := value.(type) { - case string: - return strings.ToUpper(v) - case []any: - // 联合类型 ["string", "null"] -> 取第一个非 null 类型 - for _, t := range v { - if ts, ok := t.(string); ok && ts != "null" { - return strings.ToUpper(ts) - } - } - // 如果只有 null,返回 STRING - return "STRING" - default: - return value - } -} diff --git a/backend/internal/pkg/antigravity/response_transformer.go b/backend/internal/pkg/antigravity/response_transformer.go index 04424c03..a605fee2 100644 --- a/backend/internal/pkg/antigravity/response_transformer.go +++ b/backend/internal/pkg/antigravity/response_transformer.go @@ -3,6 +3,7 @@ package antigravity import ( "encoding/json" "fmt" + "log" "strings" ) @@ -242,6 +243,14 @@ func (p *NonStreamingProcessor) buildResponse(geminiResp *GeminiResponse, respon var finishReason string if len(geminiResp.Candidates) > 0 { finishReason = geminiResp.Candidates[0].FinishReason + if finishReason == "MALFORMED_FUNCTION_CALL" { + log.Printf("[Antigravity] MALFORMED_FUNCTION_CALL detected in response for model %s", originalModel) + if geminiResp.Candidates[0].Content != nil { + if b, err := json.Marshal(geminiResp.Candidates[0].Content); err == nil { + log.Printf("[Antigravity] Malformed content: %s", string(b)) + } + } + } } stopReason := "end_turn" diff --git a/backend/internal/pkg/antigravity/schema_cleaner.go b/backend/internal/pkg/antigravity/schema_cleaner.go new file mode 100644 index 00000000..1a06b2c8 --- /dev/null +++ b/backend/internal/pkg/antigravity/schema_cleaner.go @@ -0,0 +1,526 @@ +package antigravity + +import ( + "fmt" + "strings" +) + +// CleanJSONSchema 清理 JSON Schema,移除 Antigravity/Gemini 不支持的字段 +// 参考 Antigravity-Manager/src-tauri/src/proxy/common/json_schema.rs 实现 +// 确保 schema 符合 JSON Schema draft 2020-12 且适配 Gemini v1internal +func CleanJSONSchema(schema map[string]any) map[string]any { + if schema == nil { + return nil + } + // 0. 预处理:展开 $ref (Schema Flattening) + // (Go map 是引用的,直接修改 schema) + flattenRefs(schema, extractDefs(schema)) + + // 递归清理 + cleaned := cleanJSONSchemaRecursive(schema) + result, ok := cleaned.(map[string]any) + if !ok { + return nil + } + + return result +} + +// extractDefs 提取并移除定义的 helper +func extractDefs(schema map[string]any) map[string]any { + defs := make(map[string]any) + if d, ok := schema["$defs"].(map[string]any); ok { + for k, v := range d { + defs[k] = v + } + delete(schema, "$defs") + } + if d, ok := schema["definitions"].(map[string]any); ok { + for k, v := range d { + defs[k] = v + } + delete(schema, "definitions") + } + return defs +} + +// flattenRefs 递归展开 $ref +func flattenRefs(schema map[string]any, defs map[string]any) { + if len(defs) == 0 { + return // 无需展开 + } + + // 检查并替换 $ref + if ref, ok := schema["$ref"].(string); ok { + delete(schema, "$ref") + // 解析引用名 (例如 #/$defs/MyType -> MyType) + parts := strings.Split(ref, "/") + refName := parts[len(parts)-1] + + if defSchema, exists := defs[refName]; exists { + if defMap, ok := defSchema.(map[string]any); ok { + // 合并定义内容 (不覆盖现有 key) + for k, v := range defMap { + if _, has := schema[k]; !has { + schema[k] = deepCopy(v) // 需深拷贝避免共享引用 + } + } + // 递归处理刚刚合并进来的内容 + flattenRefs(schema, defs) + } + } + } + + // 遍历子节点 + for _, v := range schema { + if subMap, ok := v.(map[string]any); ok { + flattenRefs(subMap, defs) + } else if subArr, ok := v.([]any); ok { + for _, item := range subArr { + if itemMap, ok := item.(map[string]any); ok { + flattenRefs(itemMap, defs) + } + } + } + } +} + +// deepCopy 深拷贝 (简单实现,仅针对 JSON 类型) +func deepCopy(src any) any { + if src == nil { + return nil + } + switch v := src.(type) { + case map[string]any: + dst := make(map[string]any) + for k, val := range v { + dst[k] = deepCopy(val) + } + return dst + case []any: + dst := make([]any, len(v)) + for i, val := range v { + dst[i] = deepCopy(val) + } + return dst + default: + return src + } +} + +// cleanJSONSchemaRecursive 递归核心清理逻辑 +// 返回处理后的值 (通常是 input map,但可能修改内部结构) +func cleanJSONSchemaRecursive(value any) any { + schemaMap, ok := value.(map[string]any) + if !ok { + return value + } + + // 0. [NEW] 合并 allOf + mergeAllOf(schemaMap) + + // 1. [CRITICAL] 深度递归处理子项 + if props, ok := schemaMap["properties"].(map[string]any); ok { + for _, v := range props { + cleanJSONSchemaRecursive(v) + } + // Go 中不需要像 Rust 那样显式处理 nullable_keys remove required, + // 因为我们在子项处理中会正确设置 type 和 description + } else if items, ok := schemaMap["items"]; ok { + // [FIX] Gemini 期望 "items" 是单个 Schema 对象(列表验证),而不是数组(元组验证)。 + if itemsArr, ok := items.([]any); ok { + // 策略:将元组 [A, B] 视为 A、B 中的最佳匹配项。 + best := extractBestSchemaFromUnion(itemsArr) + if best == nil { + // 回退到通用字符串 + best = map[string]any{"type": "string"} + } + // 用处理后的对象替换原有数组 + cleanedBest := cleanJSONSchemaRecursive(best) + schemaMap["items"] = cleanedBest + } else { + cleanJSONSchemaRecursive(items) + } + } else { + // 遍历所有值递归 + for _, v := range schemaMap { + if _, isMap := v.(map[string]any); isMap { + cleanJSONSchemaRecursive(v) + } else if _, isArr := v.([]any); isArr { + // 数组内的对象也要递归 + } + } + // 稍微补全一下数组递归 + for k, v := range schemaMap { + if arr, ok := v.([]any); ok { + for _, item := range arr { + cleanJSONSchemaRecursive(item) + } + schemaMap[k] = arr + } + } + } + + // 2. [FIX] 处理 anyOf/oneOf 联合类型: 合并属性而非直接删除 + var unionArray []any + typeStr, _ := schemaMap["type"].(string) + if typeStr == "" || typeStr == "object" { + if anyOf, ok := schemaMap["anyOf"].([]any); ok { + unionArray = anyOf + } else if oneOf, ok := schemaMap["oneOf"].([]any); ok { + unionArray = oneOf + } + } + + if len(unionArray) > 0 { + if bestBranch := extractBestSchemaFromUnion(unionArray); bestBranch != nil { + if bestMap, ok := bestBranch.(map[string]any); ok { + // 合并分支内容 + for k, v := range bestMap { + if k == "properties" { + targetProps, _ := schemaMap["properties"].(map[string]any) + if targetProps == nil { + targetProps = make(map[string]any) + schemaMap["properties"] = targetProps + } + if sourceProps, ok := v.(map[string]any); ok { + for pk, pv := range sourceProps { + if _, exists := targetProps[pk]; !exists { + targetProps[pk] = deepCopy(pv) + } + } + } + } else if k == "required" { + targetReq, _ := schemaMap["required"].([]any) + if sourceReq, ok := v.([]any); ok { + for _, rv := range sourceReq { + // 简单的去重添加 + exists := false + for _, tr := range targetReq { + if tr == rv { + exists = true + break + } + } + if !exists { + targetReq = append(targetReq, rv) + } + } + schemaMap["required"] = targetReq + } + } else if _, exists := schemaMap[k]; !exists { + schemaMap[k] = deepCopy(v) + } + } + } + } + } + + // 3. [SAFETY] 检查当前对象是否为 JSON Schema 节点 + looksLikeSchema := hasKey(schemaMap, "type") || + hasKey(schemaMap, "properties") || + hasKey(schemaMap, "items") || + hasKey(schemaMap, "enum") || + hasKey(schemaMap, "anyOf") || + hasKey(schemaMap, "oneOf") || + hasKey(schemaMap, "allOf") + + if looksLikeSchema { + // 4. [ROBUST] 约束迁移 + migrateConstraints(schemaMap) + + // 5. [CRITICAL] 白名单过滤 + allowedFields := map[string]bool{ + "type": true, + "description": true, + "properties": true, + "required": true, + "items": true, + "enum": true, + "title": true, + } + for k := range schemaMap { + if !allowedFields[k] { + delete(schemaMap, k) + } + } + + // 6. [SAFETY] 处理空 Object + if t, _ := schemaMap["type"].(string); t == "object" { + hasProps := false + if props, ok := schemaMap["properties"].(map[string]any); ok && len(props) > 0 { + hasProps = true + } + if !hasProps { + schemaMap["properties"] = map[string]any{ + "reason": map[string]any{ + "type": "string", + "description": "Reason for calling this tool", + }, + } + schemaMap["required"] = []any{"reason"} + } + } + + // 7. [SAFETY] Required 字段对齐 + if props, ok := schemaMap["properties"].(map[string]any); ok { + if req, ok := schemaMap["required"].([]any); ok { + var validReq []any + for _, r := range req { + if rStr, ok := r.(string); ok { + if _, exists := props[rStr]; exists { + validReq = append(validReq, r) + } + } + } + if len(validReq) > 0 { + schemaMap["required"] = validReq + } else { + delete(schemaMap, "required") + } + } + } + + // 8. 处理 type 字段 (Lowercase + Nullable 提取) + isEffectivelyNullable := false + if typeVal, exists := schemaMap["type"]; exists { + var selectedType string + switch v := typeVal.(type) { + case string: + lower := strings.ToLower(v) + if lower == "null" { + isEffectivelyNullable = true + selectedType = "string" // fallback + } else { + selectedType = lower + } + case []any: + // ["string", "null"] + for _, t := range v { + if ts, ok := t.(string); ok { + lower := strings.ToLower(ts) + if lower == "null" { + isEffectivelyNullable = true + } else if selectedType == "" { + selectedType = lower + } + } + } + if selectedType == "" { + selectedType = "string" + } + } + schemaMap["type"] = selectedType + } else { + // 默认 object 如果有 properties (虽然上面白名单过滤可能删了 type 如果它不在... 但 type 必在 allowlist) + // 如果没有 type,但有 properties,补一个 + if hasKey(schemaMap, "properties") { + schemaMap["type"] = "object" + } else { + // 默认为 string ? or object? Gemini 通常需要明确 type + schemaMap["type"] = "object" + } + } + + if isEffectivelyNullable { + desc, _ := schemaMap["description"].(string) + if !strings.Contains(desc, "nullable") { + if desc != "" { + desc += " " + } + desc += "(nullable)" + schemaMap["description"] = desc + } + } + + // 9. Enum 值强制转字符串 + if enumVals, ok := schemaMap["enum"].([]any); ok { + hasNonString := false + for i, val := range enumVals { + if _, isStr := val.(string); !isStr { + hasNonString = true + if val == nil { + enumVals[i] = "null" + } else { + enumVals[i] = fmt.Sprintf("%v", val) + } + } + } + // If we mandated string values, we must ensure type is string + if hasNonString { + schemaMap["type"] = "string" + } + } + } + + return schemaMap +} + +func hasKey(m map[string]any, k string) bool { + _, ok := m[k] + return ok +} + +func migrateConstraints(m map[string]any) { + constraints := []struct { + key string + label string + }{ + {"minLength", "minLen"}, + {"maxLength", "maxLen"}, + {"pattern", "pattern"}, + {"minimum", "min"}, + {"maximum", "max"}, + {"multipleOf", "multipleOf"}, + {"exclusiveMinimum", "exclMin"}, + {"exclusiveMaximum", "exclMax"}, + {"minItems", "minItems"}, + {"maxItems", "maxItems"}, + {"propertyNames", "propertyNames"}, + {"format", "format"}, + } + + var hints []string + for _, c := range constraints { + if val, ok := m[c.key]; ok && val != nil { + hints = append(hints, fmt.Sprintf("%s: %v", c.label, val)) + } + } + + if len(hints) > 0 { + suffix := fmt.Sprintf(" [Constraint: %s]", strings.Join(hints, ", ")) + desc, _ := m["description"].(string) + if !strings.Contains(desc, suffix) { + m["description"] = desc + suffix + } + } +} + +// mergeAllOf 合并 allOf +func mergeAllOf(m map[string]any) { + allOf, ok := m["allOf"].([]any) + if !ok { + return + } + delete(m, "allOf") + + mergedProps := make(map[string]any) + mergedReq := make(map[string]bool) + otherFields := make(map[string]any) + + for _, sub := range allOf { + if subMap, ok := sub.(map[string]any); ok { + // Props + if props, ok := subMap["properties"].(map[string]any); ok { + for k, v := range props { + mergedProps[k] = v + } + } + // Required + if reqs, ok := subMap["required"].([]any); ok { + for _, r := range reqs { + if s, ok := r.(string); ok { + mergedReq[s] = true + } + } + } + // Others + for k, v := range subMap { + if k != "properties" && k != "required" && k != "allOf" { + if _, exists := otherFields[k]; !exists { + otherFields[k] = v + } + } + } + } + } + + // Apply + for k, v := range otherFields { + if _, exists := m[k]; !exists { + m[k] = v + } + } + if len(mergedProps) > 0 { + existProps, _ := m["properties"].(map[string]any) + if existProps == nil { + existProps = make(map[string]any) + m["properties"] = existProps + } + for k, v := range mergedProps { + if _, exists := existProps[k]; !exists { + existProps[k] = v + } + } + } + if len(mergedReq) > 0 { + existReq, _ := m["required"].([]any) + var currentReqs []string + for _, r := range existReq { + if s, ok := r.(string); ok { + currentReqs = append(currentReqs, s) + delete(mergedReq, s) // already exists + } + } + // append new + for r := range mergedReq { + existReq = append(existReq, r) + } + m["required"] = existReq + } +} + +// extractBestSchemaFromUnion 从 anyOf/oneOf 中选取最佳分支 +func extractBestSchemaFromUnion(unionArray []any) any { + var bestOption any + bestScore := -1 + + for _, item := range unionArray { + score := scoreSchemaOption(item) + if score > bestScore { + bestScore = score + bestOption = item + } + } + return bestOption +} + +func scoreSchemaOption(val any) int { + m, ok := val.(map[string]any) + if !ok { + return 0 + } + typeStr, _ := m["type"].(string) + + if hasKey(m, "properties") || typeStr == "object" { + return 3 + } + if hasKey(m, "items") || typeStr == "array" { + return 2 + } + if typeStr != "" && typeStr != "null" { + return 1 + } + return 0 +} + +// DeepCleanUndefined 深度清理值为 "[undefined]" 的字段 +func DeepCleanUndefined(value any) { + if value == nil { + return + } + switch v := value.(type) { + case map[string]any: + for k, val := range v { + if s, ok := val.(string); ok && s == "[undefined]" { + delete(v, k) + continue + } + DeepCleanUndefined(val) + } + case []any: + for _, val := range v { + DeepCleanUndefined(val) + } + } +} diff --git a/backend/internal/pkg/antigravity/stream_transformer.go b/backend/internal/pkg/antigravity/stream_transformer.go index da0c6f97..b384658a 100644 --- a/backend/internal/pkg/antigravity/stream_transformer.go +++ b/backend/internal/pkg/antigravity/stream_transformer.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "log" "strings" ) @@ -102,6 +103,14 @@ func (p *StreamingProcessor) ProcessLine(line string) []byte { // 检查是否结束 if len(geminiResp.Candidates) > 0 { finishReason := geminiResp.Candidates[0].FinishReason + if finishReason == "MALFORMED_FUNCTION_CALL" { + log.Printf("[Antigravity] MALFORMED_FUNCTION_CALL detected in stream for model %s", p.originalModel) + if geminiResp.Candidates[0].Content != nil { + if b, err := json.Marshal(geminiResp.Candidates[0].Content); err == nil { + log.Printf("[Antigravity] Malformed content: %s", string(b)) + } + } + } if finishReason != "" { _, _ = result.Write(p.emitFinish(finishReason)) } diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 1a239484..da7ba853 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1320,6 +1320,14 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co return nil, err } + // 清理 Schema + if cleanedBody, err := cleanGeminiRequest(injectedBody); err == nil { + injectedBody = cleanedBody + log.Printf("[Antigravity] Cleaned request schema in forwarded request for account %s", account.Name) + } else { + log.Printf("[Antigravity] Failed to clean schema: %v", err) + } + // 包装请求 wrappedBody, err := s.wrapV1InternalRequest(projectID, mappedModel, injectedBody) if err != nil { @@ -1752,6 +1760,19 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context if u := extractGeminiUsage(parsed); u != nil { usage = u } + // Check for MALFORMED_FUNCTION_CALL + if candidates, ok := parsed["candidates"].([]any); ok && len(candidates) > 0 { + if cand, ok := candidates[0].(map[string]any); ok { + if fr, ok := cand["finishReason"].(string); ok && fr == "MALFORMED_FUNCTION_CALL" { + log.Printf("[Antigravity] MALFORMED_FUNCTION_CALL detected in forward stream") + if content, ok := cand["content"]; ok { + if b, err := json.Marshal(content); err == nil { + log.Printf("[Antigravity] Malformed content: %s", string(b)) + } + } + } + } + } } if firstTokenMs == nil { @@ -1901,6 +1922,20 @@ func (s *AntigravityGatewayService) handleGeminiStreamToNonStreaming(c *gin.Cont usage = u } + // Check for MALFORMED_FUNCTION_CALL + if candidates, ok := parsed["candidates"].([]any); ok && len(candidates) > 0 { + if cand, ok := candidates[0].(map[string]any); ok { + if fr, ok := cand["finishReason"].(string); ok && fr == "MALFORMED_FUNCTION_CALL" { + log.Printf("[Antigravity] MALFORMED_FUNCTION_CALL detected in forward non-stream collect") + if content, ok := cand["content"]; ok { + if b, err := json.Marshal(content); err == nil { + log.Printf("[Antigravity] Malformed content: %s", string(b)) + } + } + } + } + } + // 保留最后一个有 parts 的响应 if parts := extractGeminiParts(parsed); len(parts) > 0 { lastWithParts = parsed @@ -2541,3 +2576,55 @@ func isImageGenerationModel(model string) bool { modelLower == "gemini-2.5-flash-image-preview" || strings.HasPrefix(modelLower, "gemini-2.5-flash-image-") } + +// cleanGeminiRequest 清理 Gemini 请求体中的 Schema +func cleanGeminiRequest(body []byte) ([]byte, error) { + var payload map[string]any + if err := json.Unmarshal(body, &payload); err != nil { + return nil, err + } + + modified := false + + // 1. 清理 Tools + if tools, ok := payload["tools"].([]any); ok && len(tools) > 0 { + for _, t := range tools { + toolMap, ok := t.(map[string]any) + if !ok { + continue + } + + // function_declarations (snake_case) or functionDeclarations (camelCase) + var funcs []any + if f, ok := toolMap["functionDeclarations"].([]any); ok { + funcs = f + } else if f, ok := toolMap["function_declarations"].([]any); ok { + funcs = f + } + + if len(funcs) == 0 { + continue + } + + for _, f := range funcs { + funcMap, ok := f.(map[string]any) + if !ok { + continue + } + + if params, ok := funcMap["parameters"].(map[string]any); ok { + antigravity.DeepCleanUndefined(params) + cleaned := antigravity.CleanJSONSchema(params) + funcMap["parameters"] = cleaned + modified = true + } + } + } + } + + if !modified { + return body, nil + } + + return json.Marshal(payload) +} From 498c6cfae9d65098f5eb55bca4d399fb5ddc80f7 Mon Sep 17 00:00:00 2001 From: 0xff26b9a8 <25315788+0xff26b9a8@users.noreply.github.com> Date: Wed, 21 Jan 2026 10:58:39 +0800 Subject: [PATCH 031/214] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20schema=20?= =?UTF-8?q?=E6=B8=85=E7=90=86=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../internal/pkg/antigravity/schema_cleaner.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/backend/internal/pkg/antigravity/schema_cleaner.go b/backend/internal/pkg/antigravity/schema_cleaner.go index 1a06b2c8..0ee746aa 100644 --- a/backend/internal/pkg/antigravity/schema_cleaner.go +++ b/backend/internal/pkg/antigravity/schema_cleaner.go @@ -146,17 +146,10 @@ func cleanJSONSchemaRecursive(value any) any { for _, v := range schemaMap { if _, isMap := v.(map[string]any); isMap { cleanJSONSchemaRecursive(v) - } else if _, isArr := v.([]any); isArr { - // 数组内的对象也要递归 - } - } - // 稍微补全一下数组递归 - for k, v := range schemaMap { - if arr, ok := v.([]any); ok { + } else if arr, isArr := v.([]any); isArr { for _, item := range arr { cleanJSONSchemaRecursive(item) } - schemaMap[k] = arr } } } @@ -455,18 +448,18 @@ func mergeAllOf(m map[string]any) { } if len(mergedReq) > 0 { existReq, _ := m["required"].([]any) - var currentReqs []string + var validReqs []any for _, r := range existReq { if s, ok := r.(string); ok { - currentReqs = append(currentReqs, s) + validReqs = append(validReqs, s) delete(mergedReq, s) // already exists } } // append new for r := range mergedReq { - existReq = append(existReq, r) + validReqs = append(validReqs, r) } - m["required"] = existReq + m["required"] = validReqs } } From 52c745bc62d921d2014caf7aa8428829eabe3d83 Mon Sep 17 00:00:00 2001 From: song Date: Wed, 21 Jan 2026 20:40:45 +0800 Subject: [PATCH 032/214] =?UTF-8?q?feat:=20=E4=B8=BA=20Antigravity=20?= =?UTF-8?q?=E5=B9=B3=E5=8F=B0=E5=90=AF=E7=94=A8=E6=8B=A6=E6=88=AA=E9=A2=84?= =?UTF-8?q?=E7=83=AD=E8=AF=B7=E6=B1=82=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/src/components/account/CreateAccountModal.vue | 4 ++-- frontend/src/components/account/EditAccountModal.vue | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index c81de00e..232c5f98 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -1159,9 +1159,9 @@ - +
diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index d27364f1..ebc0fbda 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -512,9 +512,9 @@
- +
From 207e09500ad191e435e1e183975db86ac8ce0215 Mon Sep 17 00:00:00 2001 From: song Date: Wed, 21 Jan 2026 20:48:36 +0800 Subject: [PATCH 033/214] =?UTF-8?q?feat(antigravity):=20=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E6=8C=89=E6=A8=A1=E5=9E=8B=E7=B1=BB=E5=9E=8B=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E9=87=8D=E8=AF=95=E6=AC=A1=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增环境变量: - GATEWAY_ANTIGRAVITY_MAX_RETRIES_CLAUDE - GATEWAY_ANTIGRAVITY_MAX_RETRIES_GEMINI_TEXT - GATEWAY_ANTIGRAVITY_MAX_RETRIES_GEMINI_IMAGE 未设置时回退到平台级 GATEWAY_ANTIGRAVITY_MAX_RETRIES --- .../service/antigravity_gateway_service.go | 42 ++++++++++++++++--- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index da7ba853..85e8eec7 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -31,10 +31,13 @@ const ( ) const ( - antigravityMaxRetriesEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES" - antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" - antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" - antigravityFallbackSecondsEnv = "GATEWAY_ANTIGRAVITY_FALLBACK_COOLDOWN_SECONDS" + antigravityMaxRetriesEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES" + antigravityMaxRetriesClaudeEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES_CLAUDE" + antigravityMaxRetriesGeminiTextEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES_GEMINI_TEXT" + antigravityMaxRetriesGeminiImageEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES_GEMINI_IMAGE" + antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" + antigravityBillingModelEnv = "GATEWAY_ANTIGRAVITY_BILL_WITH_MAPPED_MODEL" + antigravityFallbackSecondsEnv = "GATEWAY_ANTIGRAVITY_FALLBACK_COOLDOWN_SECONDS" ) // antigravityRetryLoopParams 重试循环的参数 @@ -51,6 +54,7 @@ type antigravityRetryLoopParams struct { httpUpstream HTTPUpstream settingService *SettingService handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) + maxRetries int // 可选,0 表示使用平台级默认值 } // antigravityRetryLoopResult 重试循环的结果 @@ -64,7 +68,10 @@ func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopRe if len(availableURLs) == 0 { availableURLs = antigravity.BaseURLs } - maxRetries := antigravityMaxRetries() + maxRetries := p.maxRetries + if maxRetries <= 0 { + maxRetries = antigravityMaxRetries() + } var resp *http.Response var usedBaseURL string @@ -770,6 +777,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, httpUpstream: s.httpUpstream, settingService: s.settingService, handleError: s.handleUpstreamError, + maxRetries: antigravityMaxRetriesForModel(originalModel), }) if err != nil { return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") @@ -846,6 +854,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, httpUpstream: s.httpUpstream, settingService: s.settingService, handleError: s.handleUpstreamError, + maxRetries: antigravityMaxRetriesForModel(originalModel), }) if retryErr != nil { appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ @@ -1352,6 +1361,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co httpUpstream: s.httpUpstream, settingService: s.settingService, handleError: s.handleUpstreamError, + maxRetries: antigravityMaxRetriesForModel(originalModel), }) if err != nil { return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") @@ -1560,6 +1570,28 @@ func antigravityMaxRetries() int { return value } +// antigravityMaxRetriesForModel 根据模型类型获取重试次数 +// 优先使用模型细分配置,未设置则回退到平台级配置 +func antigravityMaxRetriesForModel(model string) int { + var envKey string + if strings.HasPrefix(model, "claude-") { + envKey = antigravityMaxRetriesClaudeEnv + } else if isImageGenerationModel(model) { + envKey = antigravityMaxRetriesGeminiImageEnv + } else if strings.HasPrefix(model, "gemini-") { + envKey = antigravityMaxRetriesGeminiTextEnv + } + + if envKey != "" { + if raw := strings.TrimSpace(os.Getenv(envKey)); raw != "" { + if value, err := strconv.Atoi(raw); err == nil && value > 0 { + return value + } + } + } + return antigravityMaxRetries() +} + func antigravityUseMappedModelForBilling() bool { v := strings.ToLower(strings.TrimSpace(os.Getenv(antigravityBillingModelEnv))) return v == "1" || v == "true" || v == "yes" || v == "on" From 3002c7a17f83b5ca1b174fdf3aea2046fa703649 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 23 Jan 2026 10:44:21 +0800 Subject: [PATCH 034/214] Clamp Claude maxOutputTokens to 64000 --- .../pkg/antigravity/request_transformer.go | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 1b21bd58..80063cb8 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -490,9 +490,23 @@ func parseToolResultContent(content json.RawMessage, isError bool) string { } // buildGenerationConfig 构建 generationConfig +const ( + defaultMaxOutputTokens = 64000 + maxOutputTokensUpperBound = 65000 + maxOutputTokensClaude = 64000 +) + +func maxOutputTokensLimit(model string) int { + if strings.HasPrefix(model, "claude-") { + return maxOutputTokensClaude + } + return maxOutputTokensUpperBound +} + func buildGenerationConfig(req *ClaudeRequest) *GeminiGenerationConfig { + maxLimit := maxOutputTokensLimit(req.Model) config := &GeminiGenerationConfig{ - MaxOutputTokens: 64000, // 默认最大输出 + MaxOutputTokens: defaultMaxOutputTokens, // 默认最大输出 StopSequences: DefaultStopSequences, } @@ -516,6 +530,10 @@ func buildGenerationConfig(req *ClaudeRequest) *GeminiGenerationConfig { } } + if config.MaxOutputTokens > maxLimit { + config.MaxOutputTokens = maxLimit + } + // 其他参数 if req.Temperature != nil { config.Temperature = req.Temperature From 316f2fee211709db73d476881c13ca6a844ac51f Mon Sep 17 00:00:00 2001 From: song Date: Fri, 23 Jan 2026 19:39:48 +0800 Subject: [PATCH 035/214] feat(ops): add account switch metrics and trend --- .../internal/repository/ops_repo_metrics.go | 27 ++-- .../internal/repository/ops_repo_trends.go | 49 +++++- .../internal/service/ops_metrics_collector.go | 27 ++++ backend/internal/service/ops_port.go | 2 + backend/internal/service/ops_trend_models.go | 1 + ...42_add_ops_system_metrics_switch_count.sql | 3 + frontend/src/api/admin/ops.ts | 2 + frontend/src/i18n/locales/en.ts | 5 + frontend/src/i18n/locales/zh.ts | 5 + frontend/src/views/admin/ops/OpsDashboard.vue | 49 +++++- .../ops/components/OpsDashboardSkeleton.vue | 7 +- .../components/OpsSwitchRateTrendChart.vue | 150 ++++++++++++++++++ 12 files changed, 307 insertions(+), 20 deletions(-) create mode 100644 backend/migrations/042_add_ops_system_metrics_switch_count.sql create mode 100644 frontend/src/views/admin/ops/components/OpsSwitchRateTrendChart.vue diff --git a/backend/internal/repository/ops_repo_metrics.go b/backend/internal/repository/ops_repo_metrics.go index 713e0eb9..f1e57c38 100644 --- a/backend/internal/repository/ops_repo_metrics.go +++ b/backend/internal/repository/ops_repo_metrics.go @@ -43,6 +43,7 @@ INSERT INTO ops_system_metrics ( upstream_529_count, token_consumed, + account_switch_count, qps, tps, @@ -81,14 +82,14 @@ INSERT INTO ops_system_metrics ( $1,$2,$3,$4, $5,$6,$7,$8, $9,$10,$11, - $12,$13,$14, - $15,$16,$17,$18,$19,$20, - $21,$22,$23,$24,$25,$26, - $27,$28,$29,$30, - $31,$32, - $33,$34, - $35,$36,$37, - $38,$39 + $12,$13,$14,$15, + $16,$17,$18,$19,$20,$21, + $22,$23,$24,$25,$26,$27, + $28,$29,$30,$31, + $32,$33, + $34,$35, + $36,$37,$38, + $39,$40 )` _, err := r.db.ExecContext( @@ -109,6 +110,7 @@ INSERT INTO ops_system_metrics ( input.Upstream529Count, input.TokenConsumed, + input.AccountSwitchCount, opsNullFloat64(input.QPS), opsNullFloat64(input.TPS), @@ -177,7 +179,8 @@ SELECT db_conn_waiting, goroutine_count, - concurrency_queue_depth + concurrency_queue_depth, + account_switch_count FROM ops_system_metrics WHERE window_minutes = $1 AND platform IS NULL @@ -199,6 +202,7 @@ LIMIT 1` var dbWaiting sql.NullInt64 var goroutines sql.NullInt64 var queueDepth sql.NullInt64 + var accountSwitchCount sql.NullInt64 if err := r.db.QueryRowContext(ctx, q, windowMinutes).Scan( &out.ID, @@ -217,6 +221,7 @@ LIMIT 1` &dbWaiting, &goroutines, &queueDepth, + &accountSwitchCount, ); err != nil { return nil, err } @@ -273,6 +278,10 @@ LIMIT 1` v := int(queueDepth.Int64) out.ConcurrencyQueueDepth = &v } + if accountSwitchCount.Valid { + v := accountSwitchCount.Int64 + out.AccountSwitchCount = &v + } return &out, nil } diff --git a/backend/internal/repository/ops_repo_trends.go b/backend/internal/repository/ops_repo_trends.go index 022d1187..3be490dd 100644 --- a/backend/internal/repository/ops_repo_trends.go +++ b/backend/internal/repository/ops_repo_trends.go @@ -56,18 +56,44 @@ error_buckets AS ( AND COALESCE(status_code, 0) >= 400 GROUP BY 1 ), +switch_buckets AS ( + SELECT ` + errorBucketExpr + ` AS bucket, + COALESCE(SUM(CASE + WHEN ev->>'kind' IN ('failover', 'retry_exhausted_failover', 'failover_on_400') THEN 1 + ELSE 0 + END), 0) AS switch_count + FROM ops_error_logs + CROSS JOIN LATERAL jsonb_array_elements( + COALESCE(NULLIF(upstream_errors, 'null'::jsonb), '[]'::jsonb) + ) AS ev + ` + errorWhere + ` + AND upstream_errors IS NOT NULL + GROUP BY 1 +), combined AS ( - SELECT COALESCE(u.bucket, e.bucket) AS bucket, - COALESCE(u.success_count, 0) AS success_count, - COALESCE(e.error_count, 0) AS error_count, - COALESCE(u.token_consumed, 0) AS token_consumed - FROM usage_buckets u - FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket + SELECT + bucket, + SUM(success_count) AS success_count, + SUM(error_count) AS error_count, + SUM(token_consumed) AS token_consumed, + SUM(switch_count) AS switch_count + FROM ( + SELECT bucket, success_count, 0 AS error_count, token_consumed, 0 AS switch_count + FROM usage_buckets + UNION ALL + SELECT bucket, 0, error_count, 0, 0 + FROM error_buckets + UNION ALL + SELECT bucket, 0, 0, 0, switch_count + FROM switch_buckets + ) t + GROUP BY bucket ) SELECT bucket, (success_count + error_count) AS request_count, - token_consumed + token_consumed, + switch_count FROM combined ORDER BY bucket ASC` @@ -84,13 +110,18 @@ ORDER BY bucket ASC` var bucket time.Time var requests int64 var tokens sql.NullInt64 - if err := rows.Scan(&bucket, &requests, &tokens); err != nil { + var switches sql.NullInt64 + if err := rows.Scan(&bucket, &requests, &tokens, &switches); err != nil { return nil, err } tokenConsumed := int64(0) if tokens.Valid { tokenConsumed = tokens.Int64 } + switchCount := int64(0) + if switches.Valid { + switchCount = switches.Int64 + } denom := float64(bucketSeconds) if denom <= 0 { @@ -103,6 +134,7 @@ ORDER BY bucket ASC` BucketStart: bucket.UTC(), RequestCount: requests, TokenConsumed: tokenConsumed, + SwitchCount: switchCount, QPS: qps, TPS: tps, }) @@ -385,6 +417,7 @@ func fillOpsThroughputBuckets(start, end time.Time, bucketSeconds int, points [] BucketStart: cursor, RequestCount: 0, TokenConsumed: 0, + SwitchCount: 0, QPS: 0, TPS: 0, }) diff --git a/backend/internal/service/ops_metrics_collector.go b/backend/internal/service/ops_metrics_collector.go index edf32cf2..73ad1fb0 100644 --- a/backend/internal/service/ops_metrics_collector.go +++ b/backend/internal/service/ops_metrics_collector.go @@ -285,6 +285,11 @@ func (c *OpsMetricsCollector) collectAndPersist(ctx context.Context) error { return fmt.Errorf("query error counts: %w", err) } + accountSwitchCount, err := c.queryAccountSwitchCount(ctx, windowStart, windowEnd) + if err != nil { + return fmt.Errorf("query account switch counts: %w", err) + } + windowSeconds := windowEnd.Sub(windowStart).Seconds() if windowSeconds <= 0 { windowSeconds = 60 @@ -310,6 +315,7 @@ func (c *OpsMetricsCollector) collectAndPersist(ctx context.Context) error { Upstream529Count: upstream529, TokenConsumed: tokenConsumed, + AccountSwitchCount: accountSwitchCount, QPS: float64Ptr(roundTo1DP(qps)), TPS: float64Ptr(roundTo1DP(tps)), @@ -551,6 +557,27 @@ WHERE created_at >= $1 AND created_at < $2` return errorTotal, businessLimited, errorSLA, upstreamExcl429529, upstream429, upstream529, nil } +func (c *OpsMetricsCollector) queryAccountSwitchCount(ctx context.Context, start, end time.Time) (int64, error) { + q := ` +SELECT + COALESCE(SUM(CASE + WHEN ev->>'kind' IN ('failover', 'retry_exhausted_failover', 'failover_on_400') THEN 1 + ELSE 0 + END), 0) AS switch_count +FROM ops_error_logs o +CROSS JOIN LATERAL jsonb_array_elements( + COALESCE(NULLIF(o.upstream_errors, 'null'::jsonb), '[]'::jsonb) +) AS ev +WHERE o.created_at >= $1 AND o.created_at < $2 + AND o.is_count_tokens = FALSE` + + var count int64 + if err := c.db.QueryRowContext(ctx, q, start, end).Scan(&count); err != nil { + return 0, err + } + return count, nil +} + type opsCollectedSystemStats struct { cpuUsagePercent *float64 memoryUsedMB *int64 diff --git a/backend/internal/service/ops_port.go b/backend/internal/service/ops_port.go index 515b47bb..1de9c8e9 100644 --- a/backend/internal/service/ops_port.go +++ b/backend/internal/service/ops_port.go @@ -162,6 +162,7 @@ type OpsInsertSystemMetricsInput struct { Upstream529Count int64 TokenConsumed int64 + AccountSwitchCount int64 QPS *float64 TPS *float64 @@ -225,6 +226,7 @@ type OpsSystemMetricsSnapshot struct { GoroutineCount *int `json:"goroutine_count"` ConcurrencyQueueDepth *int `json:"concurrency_queue_depth"` + AccountSwitchCount *int64 `json:"account_switch_count"` } type OpsUpsertJobHeartbeatInput struct { diff --git a/backend/internal/service/ops_trend_models.go b/backend/internal/service/ops_trend_models.go index f6d07c14..97bbfebe 100644 --- a/backend/internal/service/ops_trend_models.go +++ b/backend/internal/service/ops_trend_models.go @@ -6,6 +6,7 @@ type OpsThroughputTrendPoint struct { BucketStart time.Time `json:"bucket_start"` RequestCount int64 `json:"request_count"` TokenConsumed int64 `json:"token_consumed"` + SwitchCount int64 `json:"switch_count"` QPS float64 `json:"qps"` TPS float64 `json:"tps"` } diff --git a/backend/migrations/042_add_ops_system_metrics_switch_count.sql b/backend/migrations/042_add_ops_system_metrics_switch_count.sql new file mode 100644 index 00000000..6d9f48e5 --- /dev/null +++ b/backend/migrations/042_add_ops_system_metrics_switch_count.sql @@ -0,0 +1,3 @@ +-- ops_system_metrics 增加账号切换次数统计(按分钟窗口) +ALTER TABLE ops_system_metrics + ADD COLUMN IF NOT EXISTS account_switch_count BIGINT NOT NULL DEFAULT 0; diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 6e048436..4214450f 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -136,6 +136,7 @@ export interface OpsThroughputTrendPoint { bucket_start: string request_count: number token_consumed: number + switch_count?: number qps: number tps: number } @@ -284,6 +285,7 @@ export interface OpsSystemMetricsSnapshot { goroutine_count?: number | null concurrency_queue_depth?: number | null + account_switch_count?: number | null } export interface OpsJobHeartbeat { diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 18e7d7d3..abbd4ff6 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1955,6 +1955,7 @@ export default { waiting: 'waiting', conns: 'conns', queue: 'queue', + accountSwitches: 'Account switches', ok: 'ok', lastRun: 'last_run:', lastSuccess: 'last_success:', @@ -2003,6 +2004,7 @@ export default { failedToLoadData: 'Failed to load ops data.', failedToLoadOverview: 'Failed to load overview', failedToLoadThroughputTrend: 'Failed to load throughput trend', + failedToLoadSwitchTrend: 'Failed to load avg account switches trend', failedToLoadLatencyHistogram: 'Failed to load request duration histogram', failedToLoadErrorTrend: 'Failed to load error trend', failedToLoadErrorDistribution: 'Failed to load error distribution', @@ -2011,9 +2013,11 @@ export default { tpsK: 'TPS (K)', top: 'Top:', throughputTrend: 'Throughput Trend', + switchRateTrend: 'Avg Account Switches', latencyHistogram: 'Request Duration Histogram', errorTrend: 'Error Trend', errorDistribution: 'Error Distribution', + switchRate: 'Avg switches', // Health Score & Diagnosis health: 'Health', healthCondition: 'Health Condition', @@ -2633,6 +2637,7 @@ export default { tooltips: { totalRequests: 'Total number of requests (including both successful and failed requests) in the selected time window.', throughputTrend: 'Requests/QPS + Tokens/TPS in the selected window.', + switchRateTrend: 'Trend of account switches / total requests over the last 5 hours (avg switches).', latencyHistogram: 'Request duration distribution (ms) for successful requests.', errorTrend: 'Error counts over time (SLA scope excludes business limits; upstream excludes 429/529).', errorDistribution: 'Error distribution by status code.', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index cb3a4c4c..1b398e7a 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -2103,6 +2103,7 @@ export default { waiting: '等待', conns: '连接', queue: '队列', + accountSwitches: '账号切换', ok: '正常', lastRun: '最近运行', lastSuccess: '最近成功', @@ -2152,6 +2153,7 @@ export default { failedToLoadData: '加载运维数据失败', failedToLoadOverview: '加载概览数据失败', failedToLoadThroughputTrend: '加载吞吐趋势失败', + failedToLoadSwitchTrend: '加载平均账号切换趋势失败', failedToLoadLatencyHistogram: '加载请求时长分布失败', failedToLoadErrorTrend: '加载错误趋势失败', failedToLoadErrorDistribution: '加载错误分布失败', @@ -2160,9 +2162,11 @@ export default { tpsK: 'TPS(千)', top: '最高:', throughputTrend: '吞吐趋势', + switchRateTrend: '平均账号切换趋势', latencyHistogram: '请求时长分布', errorTrend: '错误趋势', errorDistribution: '错误分布', + switchRate: '平均账号切换', // Health Score & Diagnosis health: '健康', healthCondition: '健康状况', @@ -2787,6 +2791,7 @@ export default { tooltips: { totalRequests: '当前时间窗口内的总请求数和Token消耗量。', throughputTrend: '当前窗口内的请求/QPS 与 token/TPS 趋势。', + switchRateTrend: '近5小时内账号切换次数 / 请求总数的趋势(平均切换次数)。', latencyHistogram: '成功请求的请求时长分布(毫秒)。', errorTrend: '错误趋势(SLA 口径排除业务限制;上游错误率排除 429/529)。', errorDistribution: '按状态码统计的错误分布。', diff --git a/frontend/src/views/admin/ops/OpsDashboard.vue b/frontend/src/views/admin/ops/OpsDashboard.vue index 72cb2607..927fee94 100644 --- a/frontend/src/views/admin/ops/OpsDashboard.vue +++ b/frontend/src/views/admin/ops/OpsDashboard.vue @@ -40,10 +40,18 @@ /> -
+
+
+ +
(null) const queryMode = ref('auto') const customStartTime = ref(null) const customEndTime = ref(null) +const switchTrendWindowHours = 5 +const switchTrendTimeRange = `${switchTrendWindowHours}h` +const switchTrendWindowMs = switchTrendWindowHours * 60 * 60 * 1000 const QUERY_KEYS = { timeRange: 'tr', @@ -322,6 +334,9 @@ const metricThresholds = ref(null) const throughputTrend = ref(null) const loadingTrend = ref(false) +const switchTrend = ref(null) +const loadingSwitchTrend = ref(false) + const latencyHistogram = ref(null) const loadingLatency = ref(false) @@ -491,6 +506,19 @@ function buildApiParams() { return params } +function buildSwitchTrendParams() { + const params: any = { + platform: platform.value || undefined, + group_id: groupId.value ?? undefined, + mode: queryMode.value + } + const endTime = new Date() + const startTime = new Date(endTime.getTime() - switchTrendWindowMs) + params.start_time = startTime.toISOString() + params.end_time = endTime.toISOString() + return params +} + async function refreshOverviewWithCancel(fetchSeq: number, signal: AbortSignal) { if (!opsEnabled.value) return try { @@ -504,6 +532,24 @@ async function refreshOverviewWithCancel(fetchSeq: number, signal: AbortSignal) } } +async function refreshSwitchTrendWithCancel(fetchSeq: number, signal: AbortSignal) { + if (!opsEnabled.value) return + loadingSwitchTrend.value = true + try { + const data = await opsAPI.getThroughputTrend(buildSwitchTrendParams(), { signal }) + if (fetchSeq !== dashboardFetchSeq) return + switchTrend.value = data + } catch (err: any) { + if (fetchSeq !== dashboardFetchSeq || isCanceledRequest(err)) return + switchTrend.value = null + appStore.showError(err?.message || t('admin.ops.failedToLoadSwitchTrend')) + } finally { + if (fetchSeq === dashboardFetchSeq) { + loadingSwitchTrend.value = false + } + } +} + async function refreshThroughputTrendWithCancel(fetchSeq: number, signal: AbortSignal) { if (!opsEnabled.value) return loadingTrend.value = true @@ -600,6 +646,7 @@ async function fetchData() { await Promise.all([ refreshOverviewWithCancel(fetchSeq, dashboardFetchController.signal), refreshThroughputTrendWithCancel(fetchSeq, dashboardFetchController.signal), + refreshSwitchTrendWithCancel(fetchSeq, dashboardFetchController.signal), refreshLatencyHistogramWithCancel(fetchSeq, dashboardFetchController.signal), refreshErrorTrendWithCancel(fetchSeq, dashboardFetchController.signal), refreshErrorDistributionWithCancel(fetchSeq, dashboardFetchController.signal) diff --git a/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue b/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue index cffdd8a1..6df1e888 100644 --- a/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue +++ b/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue @@ -50,7 +50,11 @@ const props = withDefaults(defineProps(), {
-
+
+
+
+
+
@@ -96,4 +100,3 @@ const props = withDefaults(defineProps(), {
- diff --git a/frontend/src/views/admin/ops/components/OpsSwitchRateTrendChart.vue b/frontend/src/views/admin/ops/components/OpsSwitchRateTrendChart.vue new file mode 100644 index 00000000..391ab8e1 --- /dev/null +++ b/frontend/src/views/admin/ops/components/OpsSwitchRateTrendChart.vue @@ -0,0 +1,150 @@ + + + From fd0370c07a725db93ca748088708df00ff491e12 Mon Sep 17 00:00:00 2001 From: song Date: Fri, 23 Jan 2026 22:24:46 +0800 Subject: [PATCH 036/214] Add invalid-request fallback routing --- backend/ent/group.go | 16 +- backend/ent/group/group.go | 8 + backend/ent/group/where.go | 55 +++ backend/ent/group_create.go | 98 +++++ backend/ent/group_update.go | 72 ++++ backend/ent/migrate/schema.go | 1 + backend/ent/mutation.go | 219 ++++++++--- backend/ent/runtime/runtime.go | 2 +- backend/ent/schema/group.go | 4 + .../internal/handler/admin/group_handler.go | 90 +++-- backend/internal/handler/dto/mappers.go | 43 +- backend/internal/handler/dto/types.go | 2 + backend/internal/handler/gateway_handler.go | 302 ++++++++------ backend/internal/repository/api_key_repo.go | 46 +-- backend/internal/repository/group_repo.go | 7 + backend/internal/service/admin_service.go | 92 ++++- .../service/admin_service_group_test.go | 371 ++++++++++++++++++ .../service/antigravity_gateway_service.go | 82 ++++ .../antigravity_gateway_service_test.go | 80 ++++ .../internal/service/api_key_auth_cache.go | 29 +- .../service/api_key_auth_cache_impl.go | 68 ++-- backend/internal/service/gateway_service.go | 74 +++- backend/internal/service/group.go | 2 + ...043_add_group_invalid_request_fallback.sql | 13 + frontend/src/i18n/locales/en.ts | 5 + frontend/src/i18n/locales/zh.ts | 5 + frontend/src/types/index.ts | 3 + frontend/src/views/admin/GroupsView.vue | 84 ++++ 28 files changed, 1532 insertions(+), 341 deletions(-) create mode 100644 backend/migrations/043_add_group_invalid_request_fallback.sql diff --git a/backend/ent/group.go b/backend/ent/group.go index 0d0c0538..f91a4079 100644 --- a/backend/ent/group.go +++ b/backend/ent/group.go @@ -56,6 +56,8 @@ type Group struct { ClaudeCodeOnly bool `json:"claude_code_only,omitempty"` // 非 Claude Code 请求降级使用的分组 ID FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` + // 无效请求兜底使用的分组 ID + FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request,omitempty"` // 模型路由配置:模型模式 -> 优先账号ID列表 ModelRouting map[string][]int64 `json:"model_routing,omitempty"` // 是否启用模型路由配置 @@ -172,7 +174,7 @@ func (*Group) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k: values[i] = new(sql.NullFloat64) - case group.FieldID, group.FieldDefaultValidityDays, group.FieldFallbackGroupID: + case group.FieldID, group.FieldDefaultValidityDays, group.FieldFallbackGroupID, group.FieldFallbackGroupIDOnInvalidRequest: values[i] = new(sql.NullInt64) case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType: values[i] = new(sql.NullString) @@ -322,6 +324,13 @@ func (_m *Group) assignValues(columns []string, values []any) error { _m.FallbackGroupID = new(int64) *_m.FallbackGroupID = value.Int64 } + case group.FieldFallbackGroupIDOnInvalidRequest: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field fallback_group_id_on_invalid_request", values[i]) + } else if value.Valid { + _m.FallbackGroupIDOnInvalidRequest = new(int64) + *_m.FallbackGroupIDOnInvalidRequest = value.Int64 + } case group.FieldModelRouting: if value, ok := values[i].(*[]byte); !ok { return fmt.Errorf("unexpected type %T for field model_routing", values[i]) @@ -487,6 +496,11 @@ func (_m *Group) String() string { builder.WriteString(fmt.Sprintf("%v", *v)) } builder.WriteString(", ") + if v := _m.FallbackGroupIDOnInvalidRequest; v != nil { + builder.WriteString("fallback_group_id_on_invalid_request=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") builder.WriteString("model_routing=") builder.WriteString(fmt.Sprintf("%v", _m.ModelRouting)) builder.WriteString(", ") diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go index d66d3edc..b63827d3 100644 --- a/backend/ent/group/group.go +++ b/backend/ent/group/group.go @@ -53,6 +53,8 @@ const ( FieldClaudeCodeOnly = "claude_code_only" // FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database. FieldFallbackGroupID = "fallback_group_id" + // FieldFallbackGroupIDOnInvalidRequest holds the string denoting the fallback_group_id_on_invalid_request field in the database. + FieldFallbackGroupIDOnInvalidRequest = "fallback_group_id_on_invalid_request" // FieldModelRouting holds the string denoting the model_routing field in the database. FieldModelRouting = "model_routing" // FieldModelRoutingEnabled holds the string denoting the model_routing_enabled field in the database. @@ -151,6 +153,7 @@ var Columns = []string{ FieldImagePrice4k, FieldClaudeCodeOnly, FieldFallbackGroupID, + FieldFallbackGroupIDOnInvalidRequest, FieldModelRouting, FieldModelRoutingEnabled, } @@ -317,6 +320,11 @@ func ByFallbackGroupID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldFallbackGroupID, opts...).ToFunc() } +// ByFallbackGroupIDOnInvalidRequest orders the results by the fallback_group_id_on_invalid_request field. +func ByFallbackGroupIDOnInvalidRequest(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFallbackGroupIDOnInvalidRequest, opts...).ToFunc() +} + // ByModelRoutingEnabled orders the results by the model_routing_enabled field. func ByModelRoutingEnabled(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldModelRoutingEnabled, opts...).ToFunc() diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go index 6ce9e4c6..02cbb3d5 100644 --- a/backend/ent/group/where.go +++ b/backend/ent/group/where.go @@ -150,6 +150,11 @@ func FallbackGroupID(v int64) predicate.Group { return predicate.Group(sql.FieldEQ(FieldFallbackGroupID, v)) } +// FallbackGroupIDOnInvalidRequest applies equality check predicate on the "fallback_group_id_on_invalid_request" field. It's identical to FallbackGroupIDOnInvalidRequestEQ. +func FallbackGroupIDOnInvalidRequest(v int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldFallbackGroupIDOnInvalidRequest, v)) +} + // ModelRoutingEnabled applies equality check predicate on the "model_routing_enabled" field. It's identical to ModelRoutingEnabledEQ. func ModelRoutingEnabled(v bool) predicate.Group { return predicate.Group(sql.FieldEQ(FieldModelRoutingEnabled, v)) @@ -1070,6 +1075,56 @@ func FallbackGroupIDNotNil() predicate.Group { return predicate.Group(sql.FieldNotNull(FieldFallbackGroupID)) } +// FallbackGroupIDOnInvalidRequestEQ applies the EQ predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestEQ(v int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldFallbackGroupIDOnInvalidRequest, v)) +} + +// FallbackGroupIDOnInvalidRequestNEQ applies the NEQ predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestNEQ(v int64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldFallbackGroupIDOnInvalidRequest, v)) +} + +// FallbackGroupIDOnInvalidRequestIn applies the In predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestIn(vs ...int64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldFallbackGroupIDOnInvalidRequest, vs...)) +} + +// FallbackGroupIDOnInvalidRequestNotIn applies the NotIn predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestNotIn(vs ...int64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldFallbackGroupIDOnInvalidRequest, vs...)) +} + +// FallbackGroupIDOnInvalidRequestGT applies the GT predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestGT(v int64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldFallbackGroupIDOnInvalidRequest, v)) +} + +// FallbackGroupIDOnInvalidRequestGTE applies the GTE predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestGTE(v int64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldFallbackGroupIDOnInvalidRequest, v)) +} + +// FallbackGroupIDOnInvalidRequestLT applies the LT predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestLT(v int64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldFallbackGroupIDOnInvalidRequest, v)) +} + +// FallbackGroupIDOnInvalidRequestLTE applies the LTE predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestLTE(v int64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldFallbackGroupIDOnInvalidRequest, v)) +} + +// FallbackGroupIDOnInvalidRequestIsNil applies the IsNil predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldFallbackGroupIDOnInvalidRequest)) +} + +// FallbackGroupIDOnInvalidRequestNotNil applies the NotNil predicate on the "fallback_group_id_on_invalid_request" field. +func FallbackGroupIDOnInvalidRequestNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldFallbackGroupIDOnInvalidRequest)) +} + // ModelRoutingIsNil applies the IsNil predicate on the "model_routing" field. func ModelRoutingIsNil() predicate.Group { return predicate.Group(sql.FieldIsNull(FieldModelRouting)) diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go index 0f251e0b..b08894da 100644 --- a/backend/ent/group_create.go +++ b/backend/ent/group_create.go @@ -286,6 +286,20 @@ func (_c *GroupCreate) SetNillableFallbackGroupID(v *int64) *GroupCreate { return _c } +// SetFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field. +func (_c *GroupCreate) SetFallbackGroupIDOnInvalidRequest(v int64) *GroupCreate { + _c.mutation.SetFallbackGroupIDOnInvalidRequest(v) + return _c +} + +// SetNillableFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field if the given value is not nil. +func (_c *GroupCreate) SetNillableFallbackGroupIDOnInvalidRequest(v *int64) *GroupCreate { + if v != nil { + _c.SetFallbackGroupIDOnInvalidRequest(*v) + } + return _c +} + // SetModelRouting sets the "model_routing" field. func (_c *GroupCreate) SetModelRouting(v map[string][]int64) *GroupCreate { _c.mutation.SetModelRouting(v) @@ -640,6 +654,10 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { _spec.SetField(group.FieldFallbackGroupID, field.TypeInt64, value) _node.FallbackGroupID = &value } + if value, ok := _c.mutation.FallbackGroupIDOnInvalidRequest(); ok { + _spec.SetField(group.FieldFallbackGroupIDOnInvalidRequest, field.TypeInt64, value) + _node.FallbackGroupIDOnInvalidRequest = &value + } if value, ok := _c.mutation.ModelRouting(); ok { _spec.SetField(group.FieldModelRouting, field.TypeJSON, value) _node.ModelRouting = value @@ -1128,6 +1146,30 @@ func (u *GroupUpsert) ClearFallbackGroupID() *GroupUpsert { return u } +// SetFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsert) SetFallbackGroupIDOnInvalidRequest(v int64) *GroupUpsert { + u.Set(group.FieldFallbackGroupIDOnInvalidRequest, v) + return u +} + +// UpdateFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field to the value that was provided on create. +func (u *GroupUpsert) UpdateFallbackGroupIDOnInvalidRequest() *GroupUpsert { + u.SetExcluded(group.FieldFallbackGroupIDOnInvalidRequest) + return u +} + +// AddFallbackGroupIDOnInvalidRequest adds v to the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsert) AddFallbackGroupIDOnInvalidRequest(v int64) *GroupUpsert { + u.Add(group.FieldFallbackGroupIDOnInvalidRequest, v) + return u +} + +// ClearFallbackGroupIDOnInvalidRequest clears the value of the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsert) ClearFallbackGroupIDOnInvalidRequest() *GroupUpsert { + u.SetNull(group.FieldFallbackGroupIDOnInvalidRequest) + return u +} + // SetModelRouting sets the "model_routing" field. func (u *GroupUpsert) SetModelRouting(v map[string][]int64) *GroupUpsert { u.Set(group.FieldModelRouting, v) @@ -1581,6 +1623,34 @@ func (u *GroupUpsertOne) ClearFallbackGroupID() *GroupUpsertOne { }) } +// SetFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsertOne) SetFallbackGroupIDOnInvalidRequest(v int64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetFallbackGroupIDOnInvalidRequest(v) + }) +} + +// AddFallbackGroupIDOnInvalidRequest adds v to the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsertOne) AddFallbackGroupIDOnInvalidRequest(v int64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddFallbackGroupIDOnInvalidRequest(v) + }) +} + +// UpdateFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateFallbackGroupIDOnInvalidRequest() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateFallbackGroupIDOnInvalidRequest() + }) +} + +// ClearFallbackGroupIDOnInvalidRequest clears the value of the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsertOne) ClearFallbackGroupIDOnInvalidRequest() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearFallbackGroupIDOnInvalidRequest() + }) +} + // SetModelRouting sets the "model_routing" field. func (u *GroupUpsertOne) SetModelRouting(v map[string][]int64) *GroupUpsertOne { return u.Update(func(s *GroupUpsert) { @@ -2205,6 +2275,34 @@ func (u *GroupUpsertBulk) ClearFallbackGroupID() *GroupUpsertBulk { }) } +// SetFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsertBulk) SetFallbackGroupIDOnInvalidRequest(v int64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetFallbackGroupIDOnInvalidRequest(v) + }) +} + +// AddFallbackGroupIDOnInvalidRequest adds v to the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsertBulk) AddFallbackGroupIDOnInvalidRequest(v int64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddFallbackGroupIDOnInvalidRequest(v) + }) +} + +// UpdateFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateFallbackGroupIDOnInvalidRequest() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateFallbackGroupIDOnInvalidRequest() + }) +} + +// ClearFallbackGroupIDOnInvalidRequest clears the value of the "fallback_group_id_on_invalid_request" field. +func (u *GroupUpsertBulk) ClearFallbackGroupIDOnInvalidRequest() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearFallbackGroupIDOnInvalidRequest() + }) +} + // SetModelRouting sets the "model_routing" field. func (u *GroupUpsertBulk) SetModelRouting(v map[string][]int64) *GroupUpsertBulk { return u.Update(func(s *GroupUpsert) { diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go index c3cc2708..ce8f3748 100644 --- a/backend/ent/group_update.go +++ b/backend/ent/group_update.go @@ -395,6 +395,33 @@ func (_u *GroupUpdate) ClearFallbackGroupID() *GroupUpdate { return _u } +// SetFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field. +func (_u *GroupUpdate) SetFallbackGroupIDOnInvalidRequest(v int64) *GroupUpdate { + _u.mutation.ResetFallbackGroupIDOnInvalidRequest() + _u.mutation.SetFallbackGroupIDOnInvalidRequest(v) + return _u +} + +// SetNillableFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableFallbackGroupIDOnInvalidRequest(v *int64) *GroupUpdate { + if v != nil { + _u.SetFallbackGroupIDOnInvalidRequest(*v) + } + return _u +} + +// AddFallbackGroupIDOnInvalidRequest adds value to the "fallback_group_id_on_invalid_request" field. +func (_u *GroupUpdate) AddFallbackGroupIDOnInvalidRequest(v int64) *GroupUpdate { + _u.mutation.AddFallbackGroupIDOnInvalidRequest(v) + return _u +} + +// ClearFallbackGroupIDOnInvalidRequest clears the value of the "fallback_group_id_on_invalid_request" field. +func (_u *GroupUpdate) ClearFallbackGroupIDOnInvalidRequest() *GroupUpdate { + _u.mutation.ClearFallbackGroupIDOnInvalidRequest() + return _u +} + // SetModelRouting sets the "model_routing" field. func (_u *GroupUpdate) SetModelRouting(v map[string][]int64) *GroupUpdate { _u.mutation.SetModelRouting(v) @@ -829,6 +856,15 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { if _u.mutation.FallbackGroupIDCleared() { _spec.ClearField(group.FieldFallbackGroupID, field.TypeInt64) } + if value, ok := _u.mutation.FallbackGroupIDOnInvalidRequest(); ok { + _spec.SetField(group.FieldFallbackGroupIDOnInvalidRequest, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedFallbackGroupIDOnInvalidRequest(); ok { + _spec.AddField(group.FieldFallbackGroupIDOnInvalidRequest, field.TypeInt64, value) + } + if _u.mutation.FallbackGroupIDOnInvalidRequestCleared() { + _spec.ClearField(group.FieldFallbackGroupIDOnInvalidRequest, field.TypeInt64) + } if value, ok := _u.mutation.ModelRouting(); ok { _spec.SetField(group.FieldModelRouting, field.TypeJSON, value) } @@ -1513,6 +1549,33 @@ func (_u *GroupUpdateOne) ClearFallbackGroupID() *GroupUpdateOne { return _u } +// SetFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field. +func (_u *GroupUpdateOne) SetFallbackGroupIDOnInvalidRequest(v int64) *GroupUpdateOne { + _u.mutation.ResetFallbackGroupIDOnInvalidRequest() + _u.mutation.SetFallbackGroupIDOnInvalidRequest(v) + return _u +} + +// SetNillableFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableFallbackGroupIDOnInvalidRequest(v *int64) *GroupUpdateOne { + if v != nil { + _u.SetFallbackGroupIDOnInvalidRequest(*v) + } + return _u +} + +// AddFallbackGroupIDOnInvalidRequest adds value to the "fallback_group_id_on_invalid_request" field. +func (_u *GroupUpdateOne) AddFallbackGroupIDOnInvalidRequest(v int64) *GroupUpdateOne { + _u.mutation.AddFallbackGroupIDOnInvalidRequest(v) + return _u +} + +// ClearFallbackGroupIDOnInvalidRequest clears the value of the "fallback_group_id_on_invalid_request" field. +func (_u *GroupUpdateOne) ClearFallbackGroupIDOnInvalidRequest() *GroupUpdateOne { + _u.mutation.ClearFallbackGroupIDOnInvalidRequest() + return _u +} + // SetModelRouting sets the "model_routing" field. func (_u *GroupUpdateOne) SetModelRouting(v map[string][]int64) *GroupUpdateOne { _u.mutation.SetModelRouting(v) @@ -1977,6 +2040,15 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) if _u.mutation.FallbackGroupIDCleared() { _spec.ClearField(group.FieldFallbackGroupID, field.TypeInt64) } + if value, ok := _u.mutation.FallbackGroupIDOnInvalidRequest(); ok { + _spec.SetField(group.FieldFallbackGroupIDOnInvalidRequest, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedFallbackGroupIDOnInvalidRequest(); ok { + _spec.AddField(group.FieldFallbackGroupIDOnInvalidRequest, field.TypeInt64, value) + } + if _u.mutation.FallbackGroupIDOnInvalidRequestCleared() { + _spec.ClearField(group.FieldFallbackGroupIDOnInvalidRequest, field.TypeInt64) + } if value, ok := _u.mutation.ModelRouting(); ok { _spec.SetField(group.FieldModelRouting, field.TypeJSON, value) } diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index b377804f..5624c05b 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -226,6 +226,7 @@ var ( {Name: "image_price_4k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, {Name: "claude_code_only", Type: field.TypeBool, Default: false}, {Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true}, + {Name: "fallback_group_id_on_invalid_request", Type: field.TypeInt64, Nullable: true}, {Name: "model_routing", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}}, {Name: "model_routing_enabled", Type: field.TypeBool, Default: false}, } diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index cd2fe8e0..69801b9f 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -3833,61 +3833,63 @@ func (m *AccountGroupMutation) ResetEdge(name string) error { // GroupMutation represents an operation that mutates the Group nodes in the graph. type GroupMutation struct { config - op Op - typ string - id *int64 - created_at *time.Time - updated_at *time.Time - deleted_at *time.Time - name *string - description *string - rate_multiplier *float64 - addrate_multiplier *float64 - is_exclusive *bool - status *string - platform *string - subscription_type *string - daily_limit_usd *float64 - adddaily_limit_usd *float64 - weekly_limit_usd *float64 - addweekly_limit_usd *float64 - monthly_limit_usd *float64 - addmonthly_limit_usd *float64 - default_validity_days *int - adddefault_validity_days *int - image_price_1k *float64 - addimage_price_1k *float64 - image_price_2k *float64 - addimage_price_2k *float64 - image_price_4k *float64 - addimage_price_4k *float64 - claude_code_only *bool - fallback_group_id *int64 - addfallback_group_id *int64 - model_routing *map[string][]int64 - model_routing_enabled *bool - clearedFields map[string]struct{} - api_keys map[int64]struct{} - removedapi_keys map[int64]struct{} - clearedapi_keys bool - redeem_codes map[int64]struct{} - removedredeem_codes map[int64]struct{} - clearedredeem_codes bool - subscriptions map[int64]struct{} - removedsubscriptions map[int64]struct{} - clearedsubscriptions bool - usage_logs map[int64]struct{} - removedusage_logs map[int64]struct{} - clearedusage_logs bool - accounts map[int64]struct{} - removedaccounts map[int64]struct{} - clearedaccounts bool - allowed_users map[int64]struct{} - removedallowed_users map[int64]struct{} - clearedallowed_users bool - done bool - oldValue func(context.Context) (*Group, error) - predicates []predicate.Group + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + description *string + rate_multiplier *float64 + addrate_multiplier *float64 + is_exclusive *bool + status *string + platform *string + subscription_type *string + daily_limit_usd *float64 + adddaily_limit_usd *float64 + weekly_limit_usd *float64 + addweekly_limit_usd *float64 + monthly_limit_usd *float64 + addmonthly_limit_usd *float64 + default_validity_days *int + adddefault_validity_days *int + image_price_1k *float64 + addimage_price_1k *float64 + image_price_2k *float64 + addimage_price_2k *float64 + image_price_4k *float64 + addimage_price_4k *float64 + claude_code_only *bool + fallback_group_id *int64 + addfallback_group_id *int64 + fallback_group_id_on_invalid_request *int64 + addfallback_group_id_on_invalid_request *int64 + model_routing *map[string][]int64 + model_routing_enabled *bool + clearedFields map[string]struct{} + api_keys map[int64]struct{} + removedapi_keys map[int64]struct{} + clearedapi_keys bool + redeem_codes map[int64]struct{} + removedredeem_codes map[int64]struct{} + clearedredeem_codes bool + subscriptions map[int64]struct{} + removedsubscriptions map[int64]struct{} + clearedsubscriptions bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + accounts map[int64]struct{} + removedaccounts map[int64]struct{} + clearedaccounts bool + allowed_users map[int64]struct{} + removedallowed_users map[int64]struct{} + clearedallowed_users bool + done bool + oldValue func(context.Context) (*Group, error) + predicates []predicate.Group } var _ ent.Mutation = (*GroupMutation)(nil) @@ -4976,6 +4978,76 @@ func (m *GroupMutation) ResetFallbackGroupID() { delete(m.clearedFields, group.FieldFallbackGroupID) } +// SetFallbackGroupIDOnInvalidRequest sets the "fallback_group_id_on_invalid_request" field. +func (m *GroupMutation) SetFallbackGroupIDOnInvalidRequest(i int64) { + m.fallback_group_id_on_invalid_request = &i + m.addfallback_group_id_on_invalid_request = nil +} + +// FallbackGroupIDOnInvalidRequest returns the value of the "fallback_group_id_on_invalid_request" field in the mutation. +func (m *GroupMutation) FallbackGroupIDOnInvalidRequest() (r int64, exists bool) { + v := m.fallback_group_id_on_invalid_request + if v == nil { + return + } + return *v, true +} + +// OldFallbackGroupIDOnInvalidRequest returns the old "fallback_group_id_on_invalid_request" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldFallbackGroupIDOnInvalidRequest(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFallbackGroupIDOnInvalidRequest is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFallbackGroupIDOnInvalidRequest requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFallbackGroupIDOnInvalidRequest: %w", err) + } + return oldValue.FallbackGroupIDOnInvalidRequest, nil +} + +// AddFallbackGroupIDOnInvalidRequest adds i to the "fallback_group_id_on_invalid_request" field. +func (m *GroupMutation) AddFallbackGroupIDOnInvalidRequest(i int64) { + if m.addfallback_group_id_on_invalid_request != nil { + *m.addfallback_group_id_on_invalid_request += i + } else { + m.addfallback_group_id_on_invalid_request = &i + } +} + +// AddedFallbackGroupIDOnInvalidRequest returns the value that was added to the "fallback_group_id_on_invalid_request" field in this mutation. +func (m *GroupMutation) AddedFallbackGroupIDOnInvalidRequest() (r int64, exists bool) { + v := m.addfallback_group_id_on_invalid_request + if v == nil { + return + } + return *v, true +} + +// ClearFallbackGroupIDOnInvalidRequest clears the value of the "fallback_group_id_on_invalid_request" field. +func (m *GroupMutation) ClearFallbackGroupIDOnInvalidRequest() { + m.fallback_group_id_on_invalid_request = nil + m.addfallback_group_id_on_invalid_request = nil + m.clearedFields[group.FieldFallbackGroupIDOnInvalidRequest] = struct{}{} +} + +// FallbackGroupIDOnInvalidRequestCleared returns if the "fallback_group_id_on_invalid_request" field was cleared in this mutation. +func (m *GroupMutation) FallbackGroupIDOnInvalidRequestCleared() bool { + _, ok := m.clearedFields[group.FieldFallbackGroupIDOnInvalidRequest] + return ok +} + +// ResetFallbackGroupIDOnInvalidRequest resets all changes to the "fallback_group_id_on_invalid_request" field. +func (m *GroupMutation) ResetFallbackGroupIDOnInvalidRequest() { + m.fallback_group_id_on_invalid_request = nil + m.addfallback_group_id_on_invalid_request = nil + delete(m.clearedFields, group.FieldFallbackGroupIDOnInvalidRequest) +} + // SetModelRouting sets the "model_routing" field. func (m *GroupMutation) SetModelRouting(value map[string][]int64) { m.model_routing = &value @@ -5419,7 +5491,7 @@ func (m *GroupMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *GroupMutation) Fields() []string { - fields := make([]string, 0, 21) + fields := make([]string, 0, 22) if m.created_at != nil { fields = append(fields, group.FieldCreatedAt) } @@ -5477,6 +5549,9 @@ func (m *GroupMutation) Fields() []string { if m.fallback_group_id != nil { fields = append(fields, group.FieldFallbackGroupID) } + if m.fallback_group_id_on_invalid_request != nil { + fields = append(fields, group.FieldFallbackGroupIDOnInvalidRequest) + } if m.model_routing != nil { fields = append(fields, group.FieldModelRouting) } @@ -5529,6 +5604,8 @@ func (m *GroupMutation) Field(name string) (ent.Value, bool) { return m.ClaudeCodeOnly() case group.FieldFallbackGroupID: return m.FallbackGroupID() + case group.FieldFallbackGroupIDOnInvalidRequest: + return m.FallbackGroupIDOnInvalidRequest() case group.FieldModelRouting: return m.ModelRouting() case group.FieldModelRoutingEnabled: @@ -5580,6 +5657,8 @@ func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, e return m.OldClaudeCodeOnly(ctx) case group.FieldFallbackGroupID: return m.OldFallbackGroupID(ctx) + case group.FieldFallbackGroupIDOnInvalidRequest: + return m.OldFallbackGroupIDOnInvalidRequest(ctx) case group.FieldModelRouting: return m.OldModelRouting(ctx) case group.FieldModelRoutingEnabled: @@ -5726,6 +5805,13 @@ func (m *GroupMutation) SetField(name string, value ent.Value) error { } m.SetFallbackGroupID(v) return nil + case group.FieldFallbackGroupIDOnInvalidRequest: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFallbackGroupIDOnInvalidRequest(v) + return nil case group.FieldModelRouting: v, ok := value.(map[string][]int64) if !ok { @@ -5775,6 +5861,9 @@ func (m *GroupMutation) AddedFields() []string { if m.addfallback_group_id != nil { fields = append(fields, group.FieldFallbackGroupID) } + if m.addfallback_group_id_on_invalid_request != nil { + fields = append(fields, group.FieldFallbackGroupIDOnInvalidRequest) + } return fields } @@ -5801,6 +5890,8 @@ func (m *GroupMutation) AddedField(name string) (ent.Value, bool) { return m.AddedImagePrice4k() case group.FieldFallbackGroupID: return m.AddedFallbackGroupID() + case group.FieldFallbackGroupIDOnInvalidRequest: + return m.AddedFallbackGroupIDOnInvalidRequest() } return nil, false } @@ -5873,6 +5964,13 @@ func (m *GroupMutation) AddField(name string, value ent.Value) error { } m.AddFallbackGroupID(v) return nil + case group.FieldFallbackGroupIDOnInvalidRequest: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddFallbackGroupIDOnInvalidRequest(v) + return nil } return fmt.Errorf("unknown Group numeric field %s", name) } @@ -5908,6 +6006,9 @@ func (m *GroupMutation) ClearedFields() []string { if m.FieldCleared(group.FieldFallbackGroupID) { fields = append(fields, group.FieldFallbackGroupID) } + if m.FieldCleared(group.FieldFallbackGroupIDOnInvalidRequest) { + fields = append(fields, group.FieldFallbackGroupIDOnInvalidRequest) + } if m.FieldCleared(group.FieldModelRouting) { fields = append(fields, group.FieldModelRouting) } @@ -5952,6 +6053,9 @@ func (m *GroupMutation) ClearField(name string) error { case group.FieldFallbackGroupID: m.ClearFallbackGroupID() return nil + case group.FieldFallbackGroupIDOnInvalidRequest: + m.ClearFallbackGroupIDOnInvalidRequest() + return nil case group.FieldModelRouting: m.ClearModelRouting() return nil @@ -6020,6 +6124,9 @@ func (m *GroupMutation) ResetField(name string) error { case group.FieldFallbackGroupID: m.ResetFallbackGroupID() return nil + case group.FieldFallbackGroupIDOnInvalidRequest: + m.ResetFallbackGroupIDOnInvalidRequest() + return nil case group.FieldModelRouting: m.ResetModelRouting() return nil diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 0cb10775..3ddb206d 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -281,7 +281,7 @@ func init() { // group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field. group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool) // groupDescModelRoutingEnabled is the schema descriptor for model_routing_enabled field. - groupDescModelRoutingEnabled := groupFields[17].Descriptor() + groupDescModelRoutingEnabled := groupFields[18].Descriptor() // group.DefaultModelRoutingEnabled holds the default value on creation for the model_routing_enabled field. group.DefaultModelRoutingEnabled = groupDescModelRoutingEnabled.Default.(bool) promocodeFields := schema.PromoCode{}.Fields() diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index 5d0a1e9a..51cae1a6 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -95,6 +95,10 @@ func (Group) Fields() []ent.Field { Optional(). Nillable(). Comment("非 Claude Code 请求降级使用的分组 ID"), + field.Int64("fallback_group_id_on_invalid_request"). + Optional(). + Nillable(). + Comment("无效请求兜底使用的分组 ID"), // 模型路由配置 (added by migration 040) field.JSON("model_routing", map[string][]int64{}). diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go index f6780dee..8229a780 100644 --- a/backend/internal/handler/admin/group_handler.go +++ b/backend/internal/handler/admin/group_handler.go @@ -35,11 +35,12 @@ type CreateGroupRequest struct { WeeklyLimitUSD *float64 `json:"weekly_limit_usd"` MonthlyLimitUSD *float64 `json:"monthly_limit_usd"` // 图片生成计费配置(antigravity 和 gemini 平台使用,负数表示清除配置) - ImagePrice1K *float64 `json:"image_price_1k"` - ImagePrice2K *float64 `json:"image_price_2k"` - ImagePrice4K *float64 `json:"image_price_4k"` - ClaudeCodeOnly bool `json:"claude_code_only"` - FallbackGroupID *int64 `json:"fallback_group_id"` + ImagePrice1K *float64 `json:"image_price_1k"` + ImagePrice2K *float64 `json:"image_price_2k"` + ImagePrice4K *float64 `json:"image_price_4k"` + ClaudeCodeOnly bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id"` + FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request"` // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled bool `json:"model_routing_enabled"` @@ -58,11 +59,12 @@ type UpdateGroupRequest struct { WeeklyLimitUSD *float64 `json:"weekly_limit_usd"` MonthlyLimitUSD *float64 `json:"monthly_limit_usd"` // 图片生成计费配置(antigravity 和 gemini 平台使用,负数表示清除配置) - ImagePrice1K *float64 `json:"image_price_1k"` - ImagePrice2K *float64 `json:"image_price_2k"` - ImagePrice4K *float64 `json:"image_price_4k"` - ClaudeCodeOnly *bool `json:"claude_code_only"` - FallbackGroupID *int64 `json:"fallback_group_id"` + ImagePrice1K *float64 `json:"image_price_1k"` + ImagePrice2K *float64 `json:"image_price_2k"` + ImagePrice4K *float64 `json:"image_price_4k"` + ClaudeCodeOnly *bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id"` + FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request"` // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled *bool `json:"model_routing_enabled"` @@ -155,22 +157,23 @@ func (h *GroupHandler) Create(c *gin.Context) { } group, err := h.adminService.CreateGroup(c.Request.Context(), &service.CreateGroupInput{ - Name: req.Name, - Description: req.Description, - Platform: req.Platform, - RateMultiplier: req.RateMultiplier, - IsExclusive: req.IsExclusive, - SubscriptionType: req.SubscriptionType, - DailyLimitUSD: req.DailyLimitUSD, - WeeklyLimitUSD: req.WeeklyLimitUSD, - MonthlyLimitUSD: req.MonthlyLimitUSD, - ImagePrice1K: req.ImagePrice1K, - ImagePrice2K: req.ImagePrice2K, - ImagePrice4K: req.ImagePrice4K, - ClaudeCodeOnly: req.ClaudeCodeOnly, - FallbackGroupID: req.FallbackGroupID, - ModelRouting: req.ModelRouting, - ModelRoutingEnabled: req.ModelRoutingEnabled, + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + FallbackGroupIDOnInvalidRequest: req.FallbackGroupIDOnInvalidRequest, + ModelRouting: req.ModelRouting, + ModelRoutingEnabled: req.ModelRoutingEnabled, }) if err != nil { response.ErrorFrom(c, err) @@ -196,23 +199,24 @@ func (h *GroupHandler) Update(c *gin.Context) { } group, err := h.adminService.UpdateGroup(c.Request.Context(), groupID, &service.UpdateGroupInput{ - Name: req.Name, - Description: req.Description, - Platform: req.Platform, - RateMultiplier: req.RateMultiplier, - IsExclusive: req.IsExclusive, - Status: req.Status, - SubscriptionType: req.SubscriptionType, - DailyLimitUSD: req.DailyLimitUSD, - WeeklyLimitUSD: req.WeeklyLimitUSD, - MonthlyLimitUSD: req.MonthlyLimitUSD, - ImagePrice1K: req.ImagePrice1K, - ImagePrice2K: req.ImagePrice2K, - ImagePrice4K: req.ImagePrice4K, - ClaudeCodeOnly: req.ClaudeCodeOnly, - FallbackGroupID: req.FallbackGroupID, - ModelRouting: req.ModelRouting, - ModelRoutingEnabled: req.ModelRoutingEnabled, + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + Status: req.Status, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + FallbackGroupIDOnInvalidRequest: req.FallbackGroupIDOnInvalidRequest, + ModelRouting: req.ModelRouting, + ModelRoutingEnabled: req.ModelRoutingEnabled, }) if err != nil { response.ErrorFrom(c, err) diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index f5bdd008..f1991c30 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -73,27 +73,28 @@ func GroupFromServiceShallow(g *service.Group) *Group { return nil } return &Group{ - ID: g.ID, - Name: g.Name, - Description: g.Description, - Platform: g.Platform, - RateMultiplier: g.RateMultiplier, - IsExclusive: g.IsExclusive, - Status: g.Status, - SubscriptionType: g.SubscriptionType, - DailyLimitUSD: g.DailyLimitUSD, - WeeklyLimitUSD: g.WeeklyLimitUSD, - MonthlyLimitUSD: g.MonthlyLimitUSD, - ImagePrice1K: g.ImagePrice1K, - ImagePrice2K: g.ImagePrice2K, - ImagePrice4K: g.ImagePrice4K, - ClaudeCodeOnly: g.ClaudeCodeOnly, - FallbackGroupID: g.FallbackGroupID, - ModelRouting: g.ModelRouting, - ModelRoutingEnabled: g.ModelRoutingEnabled, - CreatedAt: g.CreatedAt, - UpdatedAt: g.UpdatedAt, - AccountCount: g.AccountCount, + ID: g.ID, + Name: g.Name, + Description: g.Description, + Platform: g.Platform, + RateMultiplier: g.RateMultiplier, + IsExclusive: g.IsExclusive, + Status: g.Status, + SubscriptionType: g.SubscriptionType, + DailyLimitUSD: g.DailyLimitUSD, + WeeklyLimitUSD: g.WeeklyLimitUSD, + MonthlyLimitUSD: g.MonthlyLimitUSD, + ImagePrice1K: g.ImagePrice1K, + ImagePrice2K: g.ImagePrice2K, + ImagePrice4K: g.ImagePrice4K, + ClaudeCodeOnly: g.ClaudeCodeOnly, + FallbackGroupID: g.FallbackGroupID, + FallbackGroupIDOnInvalidRequest: g.FallbackGroupIDOnInvalidRequest, + ModelRouting: g.ModelRouting, + ModelRoutingEnabled: g.ModelRoutingEnabled, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, + AccountCount: g.AccountCount, } } diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 4519143c..b425523b 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -57,6 +57,8 @@ type Group struct { // Claude Code 客户端限制 ClaudeCodeOnly bool `json:"claude_code_only"` FallbackGroupID *int64 `json:"fallback_group_id"` + // 无效请求兜底分组 + FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request"` // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 6c8d9ebe..cd622a3b 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -14,6 +14,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" pkgerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/openai" @@ -325,136 +326,186 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } } - maxAccountSwitches := h.maxAccountSwitches - switchCount := 0 - failedAccountIDs := make(map[int64]struct{}) - lastFailoverStatus := 0 + currentAPIKey := apiKey + currentSubscription := subscription + var fallbackGroupID *int64 + if apiKey.Group != nil { + fallbackGroupID = apiKey.Group.FallbackGroupIDOnInvalidRequest + } + fallbackUsed := false for { - // 选择支持该模型的账号 - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs, parsedReq.MetadataUserID) - if err != nil { - if len(failedAccountIDs) == 0 { - h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) - return - } - h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) - return - } - account := selection.Account - setOpsSelectedAccount(c, account.ID) + maxAccountSwitches := h.maxAccountSwitches + switchCount := 0 + failedAccountIDs := make(map[int64]struct{}) + lastFailoverStatus := 0 + retryWithFallback := false - // 检查预热请求拦截(在账号选择后、转发前检查) - if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() - } - if reqStream { - sendMockWarmupStream(c, reqModel) - } else { - sendMockWarmupResponse(c, reqModel) - } - return - } - - // 3. 获取账号并发槽位 - accountReleaseFunc := selection.ReleaseFunc - if !selection.Acquired { - if selection.WaitPlan == nil { - h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) - return - } - accountWaitCounted := false - canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + for { + // 选择支持该模型的账号 + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), currentAPIKey.GroupID, sessionKey, reqModel, failedAccountIDs, parsedReq.MetadataUserID) if err != nil { - log.Printf("Increment account wait count failed: %v", err) - } else if !canWait { - log.Printf("Account wait queue full: account=%d", account.ID) - h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) - return - } - if err == nil && canWait { - accountWaitCounted = true - } - defer func() { - if accountWaitCounted { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) - } - }() - - accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( - c, - account.ID, - selection.WaitPlan.MaxConcurrency, - selection.WaitPlan.Timeout, - reqStream, - &streamStarted, - ) - if err != nil { - log.Printf("Account concurrency acquire failed: %v", err) - h.handleConcurrencyError(c, err, "account", streamStarted) - return - } - if accountWaitCounted { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) - accountWaitCounted = false - } - if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil { - log.Printf("Bind sticky session failed: %v", err) - } - } - // 账号槽位/等待计数需要在超时或断开时安全回收 - accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) - - // 转发请求 - 根据账号平台分流 - var result *service.ForwardResult - if account.Platform == service.PlatformAntigravity { - result, err = h.antigravityGatewayService.Forward(c.Request.Context(), c, account, body) - } else { - result, err = h.gatewayService.Forward(c.Request.Context(), c, account, parsedReq) - } - if accountReleaseFunc != nil { - accountReleaseFunc() - } - if err != nil { - var failoverErr *service.UpstreamFailoverError - if errors.As(err, &failoverErr) { - failedAccountIDs[account.ID] = struct{}{} - lastFailoverStatus = failoverErr.StatusCode - if switchCount >= maxAccountSwitches { - h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + if len(failedAccountIDs) == 0 { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) return } - switchCount++ - log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) - continue + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return } - // 错误响应已在Forward中处理,这里只记录日志 - log.Printf("Account %d: Forward request failed: %v", account.ID, err) + account := selection.Account + setOpsSelectedAccount(c, account.ID) + + // 检查预热请求拦截(在账号选择后、转发前检查) + if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + if reqStream { + sendMockWarmupStream(c, reqModel) + } else { + sendMockWarmupResponse(c, reqModel) + } + return + } + + // 3. 获取账号并发槽位 + accountReleaseFunc := selection.ReleaseFunc + if !selection.Acquired { + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + accountWaitCounted := false + canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) + return + } + if err == nil && canWait { + accountWaitCounted = true + } + defer func() { + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + }() + + accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + &streamStarted, + ) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), currentAPIKey.GroupID, sessionKey, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } + } + // 账号槽位/等待计数需要在超时或断开时安全回收 + accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) + + // 转发请求 - 根据账号平台分流 + var result *service.ForwardResult + if account.Platform == service.PlatformAntigravity { + result, err = h.antigravityGatewayService.Forward(c.Request.Context(), c, account, body) + } else { + result, err = h.gatewayService.Forward(c.Request.Context(), c, account, parsedReq) + } + if accountReleaseFunc != nil { + accountReleaseFunc() + } + if err != nil { + var promptTooLongErr *service.PromptTooLongError + if errors.As(err, &promptTooLongErr) { + log.Printf("Prompt too long from antigravity: group=%d fallback_group_id=%v fallback_used=%v", currentAPIKey.GroupID, fallbackGroupID, fallbackUsed) + if !fallbackUsed && fallbackGroupID != nil && *fallbackGroupID > 0 { + fallbackGroup, err := h.gatewayService.ResolveGroupByID(c.Request.Context(), *fallbackGroupID) + if err != nil { + log.Printf("Resolve fallback group failed: %v", err) + _ = h.antigravityGatewayService.WriteMappedClaudeError(c, account, promptTooLongErr.StatusCode, promptTooLongErr.RequestID, promptTooLongErr.Body) + return + } + if fallbackGroup.Platform != service.PlatformAnthropic || + fallbackGroup.SubscriptionType == service.SubscriptionTypeSubscription || + fallbackGroup.FallbackGroupIDOnInvalidRequest != nil { + log.Printf("Fallback group invalid: group=%d platform=%s subscription=%s", fallbackGroup.ID, fallbackGroup.Platform, fallbackGroup.SubscriptionType) + _ = h.antigravityGatewayService.WriteMappedClaudeError(c, account, promptTooLongErr.StatusCode, promptTooLongErr.RequestID, promptTooLongErr.Body) + return + } + fallbackAPIKey := cloneAPIKeyWithGroup(apiKey, fallbackGroup) + if err := h.billingCacheService.CheckBillingEligibility(c.Request.Context(), fallbackAPIKey.User, fallbackAPIKey, fallbackGroup, nil); err != nil { + status, code, message := billingErrorDetails(err) + h.handleStreamingAwareError(c, status, code, message, streamStarted) + return + } + // 兜底重试按“直接请求兜底分组”处理:清除强制平台,允许按分组平台调度 + ctx := context.WithValue(c.Request.Context(), ctxkey.ForcePlatform, "") + c.Request = c.Request.WithContext(ctx) + currentAPIKey = fallbackAPIKey + currentSubscription = nil + fallbackUsed = true + retryWithFallback = true + break + } + _ = h.antigravityGatewayService.WriteMappedClaudeError(c, account, promptTooLongErr.StatusCode, promptTooLongErr.RequestID, promptTooLongErr.Body) + return + } + var failoverErr *service.UpstreamFailoverError + if errors.As(err, &failoverErr) { + failedAccountIDs[account.ID] = struct{}{} + lastFailoverStatus = failoverErr.StatusCode + if switchCount >= maxAccountSwitches { + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return + } + switchCount++ + log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + continue + } + // 错误响应已在Forward中处理,这里只记录日志 + log.Printf("Account %d: Forward request failed: %v", account.ID, err) + return + } + + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + + // 异步记录使用量(subscription已在函数开头获取) + go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ + Result: result, + APIKey: currentAPIKey, + User: currentAPIKey.User, + Account: usedAccount, + Subscription: currentSubscription, + UserAgent: ua, + IPAddress: clientIP, + }); err != nil { + log.Printf("Record usage failed: %v", err) + } + }(result, account, userAgent, clientIP) return } - // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) - userAgent := c.GetHeader("User-Agent") - clientIP := ip.GetClientIP(c) - - // 异步记录使用量(subscription已在函数开头获取) - go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ - Result: result, - APIKey: apiKey, - User: apiKey.User, - Account: usedAccount, - Subscription: subscription, - UserAgent: ua, - IPAddress: clientIP, - }); err != nil { - log.Printf("Record usage failed: %v", err) - } - }(result, account, userAgent, clientIP) - return + if !retryWithFallback { + return + } } } @@ -518,6 +569,17 @@ func (h *GatewayHandler) AntigravityModels(c *gin.Context) { }) } +func cloneAPIKeyWithGroup(apiKey *service.APIKey, group *service.Group) *service.APIKey { + if apiKey == nil || group == nil { + return apiKey + } + cloned := *apiKey + groupID := group.ID + cloned.GroupID = &groupID + cloned.Group = group + return &cloned +} + // Usage handles getting account balance for CC Switch integration // GET /v1/usage func (h *GatewayHandler) Usage(c *gin.Context) { diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index ab890844..9938a36d 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -136,6 +136,7 @@ func (r *apiKeyRepository) GetByKeyForAuth(ctx context.Context, key string) (*se group.FieldImagePrice4k, group.FieldClaudeCodeOnly, group.FieldFallbackGroupID, + group.FieldFallbackGroupIDOnInvalidRequest, group.FieldModelRoutingEnabled, group.FieldModelRouting, ) @@ -406,28 +407,29 @@ func groupEntityToService(g *dbent.Group) *service.Group { return nil } return &service.Group{ - ID: g.ID, - Name: g.Name, - Description: derefString(g.Description), - Platform: g.Platform, - RateMultiplier: g.RateMultiplier, - IsExclusive: g.IsExclusive, - Status: g.Status, - Hydrated: true, - SubscriptionType: g.SubscriptionType, - DailyLimitUSD: g.DailyLimitUsd, - WeeklyLimitUSD: g.WeeklyLimitUsd, - MonthlyLimitUSD: g.MonthlyLimitUsd, - ImagePrice1K: g.ImagePrice1k, - ImagePrice2K: g.ImagePrice2k, - ImagePrice4K: g.ImagePrice4k, - DefaultValidityDays: g.DefaultValidityDays, - ClaudeCodeOnly: g.ClaudeCodeOnly, - FallbackGroupID: g.FallbackGroupID, - ModelRouting: g.ModelRouting, - ModelRoutingEnabled: g.ModelRoutingEnabled, - CreatedAt: g.CreatedAt, - UpdatedAt: g.UpdatedAt, + ID: g.ID, + Name: g.Name, + Description: derefString(g.Description), + Platform: g.Platform, + RateMultiplier: g.RateMultiplier, + IsExclusive: g.IsExclusive, + Status: g.Status, + Hydrated: true, + SubscriptionType: g.SubscriptionType, + DailyLimitUSD: g.DailyLimitUsd, + WeeklyLimitUSD: g.WeeklyLimitUsd, + MonthlyLimitUSD: g.MonthlyLimitUsd, + ImagePrice1K: g.ImagePrice1k, + ImagePrice2K: g.ImagePrice2k, + ImagePrice4K: g.ImagePrice4k, + DefaultValidityDays: g.DefaultValidityDays, + ClaudeCodeOnly: g.ClaudeCodeOnly, + FallbackGroupID: g.FallbackGroupID, + FallbackGroupIDOnInvalidRequest: g.FallbackGroupIDOnInvalidRequest, + ModelRouting: g.ModelRouting, + ModelRoutingEnabled: g.ModelRoutingEnabled, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, } } diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index 5c4d6cf4..f207f479 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -50,6 +50,7 @@ func (r *groupRepository) Create(ctx context.Context, groupIn *service.Group) er SetDefaultValidityDays(groupIn.DefaultValidityDays). SetClaudeCodeOnly(groupIn.ClaudeCodeOnly). SetNillableFallbackGroupID(groupIn.FallbackGroupID). + SetNillableFallbackGroupIDOnInvalidRequest(groupIn.FallbackGroupIDOnInvalidRequest). SetModelRoutingEnabled(groupIn.ModelRoutingEnabled) // 设置模型路由配置 @@ -116,6 +117,12 @@ func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) er } else { builder = builder.ClearFallbackGroupID() } + // 处理 FallbackGroupIDOnInvalidRequest:nil 时清除,否则设置 + if groupIn.FallbackGroupIDOnInvalidRequest != nil { + builder = builder.SetFallbackGroupIDOnInvalidRequest(*groupIn.FallbackGroupIDOnInvalidRequest) + } else { + builder = builder.ClearFallbackGroupIDOnInvalidRequest() + } // 处理 ModelRouting:nil 时清除,否则设置 if groupIn.ModelRouting != nil { diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 1b2c7ff4..12f01810 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -108,6 +108,8 @@ type CreateGroupInput struct { ImagePrice4K *float64 ClaudeCodeOnly bool // 仅允许 Claude Code 客户端 FallbackGroupID *int64 // 降级分组 ID + // 无效请求兜底分组 ID(仅 anthropic 平台使用) + FallbackGroupIDOnInvalidRequest *int64 // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 ModelRoutingEnabled bool // 是否启用模型路由 @@ -130,6 +132,8 @@ type UpdateGroupInput struct { ImagePrice4K *float64 ClaudeCodeOnly *bool // 仅允许 Claude Code 客户端 FallbackGroupID *int64 // 降级分组 ID + // 无效请求兜底分组 ID(仅 anthropic 平台使用) + FallbackGroupIDOnInvalidRequest *int64 // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 ModelRoutingEnabled *bool // 是否启用模型路由 @@ -572,24 +576,35 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn return nil, err } } + fallbackOnInvalidRequest := input.FallbackGroupIDOnInvalidRequest + if fallbackOnInvalidRequest != nil && *fallbackOnInvalidRequest <= 0 { + fallbackOnInvalidRequest = nil + } + // 校验无效请求兜底分组 + if fallbackOnInvalidRequest != nil { + if err := s.validateFallbackGroupOnInvalidRequest(ctx, 0, platform, subscriptionType, *fallbackOnInvalidRequest); err != nil { + return nil, err + } + } group := &Group{ - Name: input.Name, - Description: input.Description, - Platform: platform, - RateMultiplier: input.RateMultiplier, - IsExclusive: input.IsExclusive, - Status: StatusActive, - SubscriptionType: subscriptionType, - DailyLimitUSD: dailyLimit, - WeeklyLimitUSD: weeklyLimit, - MonthlyLimitUSD: monthlyLimit, - ImagePrice1K: imagePrice1K, - ImagePrice2K: imagePrice2K, - ImagePrice4K: imagePrice4K, - ClaudeCodeOnly: input.ClaudeCodeOnly, - FallbackGroupID: input.FallbackGroupID, - ModelRouting: input.ModelRouting, + Name: input.Name, + Description: input.Description, + Platform: platform, + RateMultiplier: input.RateMultiplier, + IsExclusive: input.IsExclusive, + Status: StatusActive, + SubscriptionType: subscriptionType, + DailyLimitUSD: dailyLimit, + WeeklyLimitUSD: weeklyLimit, + MonthlyLimitUSD: monthlyLimit, + ImagePrice1K: imagePrice1K, + ImagePrice2K: imagePrice2K, + ImagePrice4K: imagePrice4K, + ClaudeCodeOnly: input.ClaudeCodeOnly, + FallbackGroupID: input.FallbackGroupID, + FallbackGroupIDOnInvalidRequest: fallbackOnInvalidRequest, + ModelRouting: input.ModelRouting, } if err := s.groupRepo.Create(ctx, group); err != nil { return nil, err @@ -651,6 +666,37 @@ func (s *adminServiceImpl) validateFallbackGroup(ctx context.Context, currentGro } } +// validateFallbackGroupOnInvalidRequest 校验无效请求兜底分组的有效性 +// currentGroupID: 当前分组 ID(新建时为 0) +// platform/subscriptionType: 当前分组的有效平台/订阅类型 +// fallbackGroupID: 兜底分组 ID +func (s *adminServiceImpl) validateFallbackGroupOnInvalidRequest(ctx context.Context, currentGroupID int64, platform, subscriptionType string, fallbackGroupID int64) error { + if platform != PlatformAnthropic && platform != PlatformAntigravity { + return fmt.Errorf("invalid request fallback only supported for anthropic or antigravity groups") + } + if subscriptionType == SubscriptionTypeSubscription { + return fmt.Errorf("subscription groups cannot set invalid request fallback") + } + if currentGroupID > 0 && currentGroupID == fallbackGroupID { + return fmt.Errorf("cannot set self as invalid request fallback group") + } + + fallbackGroup, err := s.groupRepo.GetByIDLite(ctx, fallbackGroupID) + if err != nil { + return fmt.Errorf("fallback group not found: %w", err) + } + if fallbackGroup.Platform != PlatformAnthropic { + return fmt.Errorf("fallback group must be anthropic platform") + } + if fallbackGroup.SubscriptionType == SubscriptionTypeSubscription { + return fmt.Errorf("fallback group cannot be subscription type") + } + if fallbackGroup.FallbackGroupIDOnInvalidRequest != nil { + return fmt.Errorf("fallback group cannot have invalid request fallback configured") + } + return nil +} + func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *UpdateGroupInput) (*Group, error) { group, err := s.groupRepo.GetByID(ctx, id) if err != nil { @@ -717,6 +763,20 @@ func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *Upd group.FallbackGroupID = nil } } + fallbackOnInvalidRequest := group.FallbackGroupIDOnInvalidRequest + if input.FallbackGroupIDOnInvalidRequest != nil { + if *input.FallbackGroupIDOnInvalidRequest > 0 { + fallbackOnInvalidRequest = input.FallbackGroupIDOnInvalidRequest + } else { + fallbackOnInvalidRequest = nil + } + } + if fallbackOnInvalidRequest != nil { + if err := s.validateFallbackGroupOnInvalidRequest(ctx, id, group.Platform, group.SubscriptionType, *fallbackOnInvalidRequest); err != nil { + return nil, err + } + } + group.FallbackGroupIDOnInvalidRequest = fallbackOnInvalidRequest // 模型路由配置 if input.ModelRouting != nil { diff --git a/backend/internal/service/admin_service_group_test.go b/backend/internal/service/admin_service_group_test.go index e0574e2e..1454dccd 100644 --- a/backend/internal/service/admin_service_group_test.go +++ b/backend/internal/service/admin_service_group_test.go @@ -378,3 +378,374 @@ func (s *groupRepoStubForFallbackCycle) GetAccountCount(_ context.Context, _ int func (s *groupRepoStubForFallbackCycle) DeleteAccountGroupsByGroupID(_ context.Context, _ int64) (int64, error) { panic("unexpected DeleteAccountGroupsByGroupID call") } + +type groupRepoStubForInvalidRequestFallback struct { + groups map[int64]*Group + created *Group + updated *Group +} + +func (s *groupRepoStubForInvalidRequestFallback) Create(_ context.Context, g *Group) error { + s.created = g + return nil +} + +func (s *groupRepoStubForInvalidRequestFallback) Update(_ context.Context, g *Group) error { + s.updated = g + return nil +} + +func (s *groupRepoStubForInvalidRequestFallback) GetByID(ctx context.Context, id int64) (*Group, error) { + return s.GetByIDLite(ctx, id) +} + +func (s *groupRepoStubForInvalidRequestFallback) GetByIDLite(_ context.Context, id int64) (*Group, error) { + if g, ok := s.groups[id]; ok { + return g, nil + } + return nil, ErrGroupNotFound +} + +func (s *groupRepoStubForInvalidRequestFallback) Delete(_ context.Context, _ int64) error { + panic("unexpected Delete call") +} + +func (s *groupRepoStubForInvalidRequestFallback) DeleteCascade(_ context.Context, _ int64) ([]int64, error) { + panic("unexpected DeleteCascade call") +} + +func (s *groupRepoStubForInvalidRequestFallback) List(_ context.Context, _ pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *groupRepoStubForInvalidRequestFallback) ListWithFilters(_ context.Context, _ pagination.PaginationParams, _, _, _ string, _ *bool) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *groupRepoStubForInvalidRequestFallback) ListActive(_ context.Context) ([]Group, error) { + panic("unexpected ListActive call") +} + +func (s *groupRepoStubForInvalidRequestFallback) ListActiveByPlatform(_ context.Context, _ string) ([]Group, error) { + panic("unexpected ListActiveByPlatform call") +} + +func (s *groupRepoStubForInvalidRequestFallback) ExistsByName(_ context.Context, _ string) (bool, error) { + panic("unexpected ExistsByName call") +} + +func (s *groupRepoStubForInvalidRequestFallback) GetAccountCount(_ context.Context, _ int64) (int64, error) { + panic("unexpected GetAccountCount call") +} + +func (s *groupRepoStubForInvalidRequestFallback) DeleteAccountGroupsByGroupID(_ context.Context, _ int64) (int64, error) { + panic("unexpected DeleteAccountGroupsByGroupID call") +} + +func TestAdminService_CreateGroup_InvalidRequestFallbackRejectsUnsupportedPlatform(t *testing.T) { + fallbackID := int64(10) + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + _, err := svc.CreateGroup(context.Background(), &CreateGroupInput{ + Name: "g1", + Platform: PlatformOpenAI, + SubscriptionType: SubscriptionTypeStandard, + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid request fallback only supported for anthropic or antigravity groups") + require.Nil(t, repo.created) +} + +func TestAdminService_CreateGroup_InvalidRequestFallbackRejectsSubscription(t *testing.T) { + fallbackID := int64(10) + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + _, err := svc.CreateGroup(context.Background(), &CreateGroupInput{ + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeSubscription, + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "subscription groups cannot set invalid request fallback") + require.Nil(t, repo.created) +} + +func TestAdminService_CreateGroup_InvalidRequestFallbackRejectsFallbackGroup(t *testing.T) { + tests := []struct { + name string + fallback *Group + wantMessage string + }{ + { + name: "openai_target", + fallback: &Group{ID: 10, Platform: PlatformOpenAI, SubscriptionType: SubscriptionTypeStandard}, + wantMessage: "fallback group must be anthropic platform", + }, + { + name: "antigravity_target", + fallback: &Group{ID: 10, Platform: PlatformAntigravity, SubscriptionType: SubscriptionTypeStandard}, + wantMessage: "fallback group must be anthropic platform", + }, + { + name: "subscription_group", + fallback: &Group{ID: 10, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeSubscription}, + wantMessage: "fallback group cannot be subscription type", + }, + { + name: "nested_fallback", + fallback: &Group{ + ID: 10, + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + FallbackGroupIDOnInvalidRequest: func() *int64 { v := int64(99); return &v }(), + }, + wantMessage: "fallback group cannot have invalid request fallback configured", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + fallbackID := tc.fallback.ID + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + fallbackID: tc.fallback, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + _, err := svc.CreateGroup(context.Background(), &CreateGroupInput{ + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantMessage) + require.Nil(t, repo.created) + }) + } +} + +func TestAdminService_CreateGroup_InvalidRequestFallbackNotFound(t *testing.T) { + fallbackID := int64(10) + repo := &groupRepoStubForInvalidRequestFallback{} + svc := &adminServiceImpl{groupRepo: repo} + + _, err := svc.CreateGroup(context.Background(), &CreateGroupInput{ + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "fallback group not found") + require.Nil(t, repo.created) +} + +func TestAdminService_CreateGroup_InvalidRequestFallbackAllowsAntigravity(t *testing.T) { + fallbackID := int64(10) + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + group, err := svc.CreateGroup(context.Background(), &CreateGroupInput{ + Name: "g1", + Platform: PlatformAntigravity, + SubscriptionType: SubscriptionTypeStandard, + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.NoError(t, err) + require.NotNil(t, group) + require.NotNil(t, repo.created) + require.Equal(t, fallbackID, *repo.created.FallbackGroupIDOnInvalidRequest) +} + +func TestAdminService_CreateGroup_InvalidRequestFallbackClearsOnZero(t *testing.T) { + zero := int64(0) + repo := &groupRepoStubForInvalidRequestFallback{} + svc := &adminServiceImpl{groupRepo: repo} + + group, err := svc.CreateGroup(context.Background(), &CreateGroupInput{ + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + FallbackGroupIDOnInvalidRequest: &zero, + }) + require.NoError(t, err) + require.NotNil(t, group) + require.NotNil(t, repo.created) + require.Nil(t, repo.created.FallbackGroupIDOnInvalidRequest) +} + +func TestAdminService_UpdateGroup_InvalidRequestFallbackPlatformMismatch(t *testing.T) { + fallbackID := int64(10) + existing := &Group{ + ID: 1, + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + Status: StatusActive, + FallbackGroupIDOnInvalidRequest: &fallbackID, + } + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + existing.ID: existing, + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + _, err := svc.UpdateGroup(context.Background(), existing.ID, &UpdateGroupInput{ + Platform: PlatformOpenAI, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid request fallback only supported for anthropic or antigravity groups") + require.Nil(t, repo.updated) +} + +func TestAdminService_UpdateGroup_InvalidRequestFallbackSubscriptionMismatch(t *testing.T) { + fallbackID := int64(10) + existing := &Group{ + ID: 1, + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + Status: StatusActive, + FallbackGroupIDOnInvalidRequest: &fallbackID, + } + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + existing.ID: existing, + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + _, err := svc.UpdateGroup(context.Background(), existing.ID, &UpdateGroupInput{ + SubscriptionType: SubscriptionTypeSubscription, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "subscription groups cannot set invalid request fallback") + require.Nil(t, repo.updated) +} + +func TestAdminService_UpdateGroup_InvalidRequestFallbackClearsOnZero(t *testing.T) { + fallbackID := int64(10) + existing := &Group{ + ID: 1, + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + Status: StatusActive, + FallbackGroupIDOnInvalidRequest: &fallbackID, + } + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + existing.ID: existing, + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + clear := int64(0) + group, err := svc.UpdateGroup(context.Background(), existing.ID, &UpdateGroupInput{ + Platform: PlatformOpenAI, + FallbackGroupIDOnInvalidRequest: &clear, + }) + require.NoError(t, err) + require.NotNil(t, group) + require.NotNil(t, repo.updated) + require.Nil(t, repo.updated.FallbackGroupIDOnInvalidRequest) +} + +func TestAdminService_UpdateGroup_InvalidRequestFallbackRejectsFallbackGroup(t *testing.T) { + fallbackID := int64(10) + existing := &Group{ + ID: 1, + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + Status: StatusActive, + } + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + existing.ID: existing, + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeSubscription}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + _, err := svc.UpdateGroup(context.Background(), existing.ID, &UpdateGroupInput{ + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "fallback group cannot be subscription type") + require.Nil(t, repo.updated) +} + +func TestAdminService_UpdateGroup_InvalidRequestFallbackSetSuccess(t *testing.T) { + fallbackID := int64(10) + existing := &Group{ + ID: 1, + Name: "g1", + Platform: PlatformAnthropic, + SubscriptionType: SubscriptionTypeStandard, + Status: StatusActive, + } + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + existing.ID: existing, + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + group, err := svc.UpdateGroup(context.Background(), existing.ID, &UpdateGroupInput{ + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.NoError(t, err) + require.NotNil(t, group) + require.NotNil(t, repo.updated) + require.Equal(t, fallbackID, *repo.updated.FallbackGroupIDOnInvalidRequest) +} + +func TestAdminService_UpdateGroup_InvalidRequestFallbackAllowsAntigravity(t *testing.T) { + fallbackID := int64(10) + existing := &Group{ + ID: 1, + Name: "g1", + Platform: PlatformAntigravity, + SubscriptionType: SubscriptionTypeStandard, + Status: StatusActive, + } + repo := &groupRepoStubForInvalidRequestFallback{ + groups: map[int64]*Group{ + existing.ID: existing, + fallbackID: {ID: fallbackID, Platform: PlatformAnthropic, SubscriptionType: SubscriptionTypeStandard}, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + group, err := svc.UpdateGroup(context.Background(), existing.ID, &UpdateGroupInput{ + FallbackGroupIDOnInvalidRequest: &fallbackID, + }) + require.NoError(t, err) + require.NotNil(t, group) + require.NotNil(t, repo.updated) + require.Equal(t, fallbackID, *repo.updated.FallbackGroupIDOnInvalidRequest) +} diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 85e8eec7..d3c15418 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -62,6 +62,17 @@ type antigravityRetryLoopResult struct { resp *http.Response } +// PromptTooLongError 表示上游明确返回 prompt too long +type PromptTooLongError struct { + StatusCode int + RequestID string + Body []byte +} + +func (e *PromptTooLongError) Error() string { + return fmt.Sprintf("prompt too long: status=%d", e.StatusCode) +} + // antigravityRetryLoop 执行带 URL fallback 的重试循环 func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopResult, error) { availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() @@ -930,6 +941,39 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // 处理错误响应(重试后仍失败或不触发重试) if resp.StatusCode >= 400 { + if resp.StatusCode == http.StatusBadRequest { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + log.Printf("%s status=400 prompt_too_long=%v upstream_message=%q request_id=%s body=%s", prefix, isPromptTooLongError(respBody), upstreamMsg, resp.Header.Get("x-request-id"), truncateForLog(respBody, 500)) + } + if resp.StatusCode == http.StatusBadRequest && isPromptTooLongError(respBody) { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "prompt_too_long", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &PromptTooLongError{ + StatusCode: resp.StatusCode, + RequestID: resp.Header.Get("x-request-id"), + Body: respBody, + } + } s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) if s.shouldFailoverUpstreamError(resp.StatusCode) { @@ -1019,21 +1063,55 @@ func isSignatureRelatedError(respBody []byte) bool { return false } +func isPromptTooLongError(respBody []byte) bool { + msg := strings.ToLower(strings.TrimSpace(extractAntigravityErrorMessage(respBody))) + if msg == "" { + msg = strings.ToLower(string(respBody)) + } + return strings.Contains(msg, "prompt is too long") +} + func extractAntigravityErrorMessage(body []byte) string { var payload map[string]any if err := json.Unmarshal(body, &payload); err != nil { return "" } + parseNestedMessage := func(msg string) string { + trimmed := strings.TrimSpace(msg) + if trimmed == "" || !strings.HasPrefix(trimmed, "{") { + return "" + } + var nested map[string]any + if err := json.Unmarshal([]byte(trimmed), &nested); err != nil { + return "" + } + if errObj, ok := nested["error"].(map[string]any); ok { + if innerMsg, ok := errObj["message"].(string); ok && strings.TrimSpace(innerMsg) != "" { + return innerMsg + } + } + if innerMsg, ok := nested["message"].(string); ok && strings.TrimSpace(innerMsg) != "" { + return innerMsg + } + return "" + } + // Google-style: {"error": {"message": "..."}} if errObj, ok := payload["error"].(map[string]any); ok { if msg, ok := errObj["message"].(string); ok && strings.TrimSpace(msg) != "" { + if innerMsg := parseNestedMessage(msg); innerMsg != "" { + return innerMsg + } return msg } } // Fallback: top-level message if msg, ok := payload["message"].(string); ok && strings.TrimSpace(msg) != "" { + if innerMsg := parseNestedMessage(msg); innerMsg != "" { + return innerMsg + } return msg } @@ -2209,6 +2287,10 @@ func (s *AntigravityGatewayService) writeMappedClaudeError(c *gin.Context, accou return fmt.Errorf("upstream error: %d message=%s", upstreamStatus, upstreamMsg) } +func (s *AntigravityGatewayService) WriteMappedClaudeError(c *gin.Context, account *Account, upstreamStatus int, upstreamRequestID string, body []byte) error { + return s.writeMappedClaudeError(c, account, upstreamStatus, upstreamRequestID, body) +} + func (s *AntigravityGatewayService) writeGoogleError(c *gin.Context, status int, message string) error { statusStr := "UNKNOWN" switch status { diff --git a/backend/internal/service/antigravity_gateway_service_test.go b/backend/internal/service/antigravity_gateway_service_test.go index 05ad9bbd..9c1fb415 100644 --- a/backend/internal/service/antigravity_gateway_service_test.go +++ b/backend/internal/service/antigravity_gateway_service_test.go @@ -1,10 +1,16 @@ package service import ( + "bytes" + "context" "encoding/json" + "io" + "net/http" + "net/http/httptest" "testing" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" ) @@ -81,3 +87,77 @@ func TestStripThinkingFromClaudeRequest_DoesNotDowngradeTools(t *testing.T) { require.Equal(t, "secret plan", blocks[0]["text"]) require.Equal(t, "tool_use", blocks[1]["type"]) } + +func TestIsPromptTooLongError(t *testing.T) { + require.True(t, isPromptTooLongError([]byte(`{"error":{"message":"Prompt is too long"}}`))) + require.True(t, isPromptTooLongError([]byte(`{"message":"Prompt is too long"}`))) + require.False(t, isPromptTooLongError([]byte(`{"error":{"message":"other"}}`))) +} + +type httpUpstreamStub struct { + resp *http.Response + err error +} + +func (s *httpUpstreamStub) Do(_ *http.Request, _ string, _ int64, _ int) (*http.Response, error) { + return s.resp, s.err +} + +func TestAntigravityGatewayService_Forward_PromptTooLong(t *testing.T) { + gin.SetMode(gin.TestMode) + writer := httptest.NewRecorder() + c, _ := gin.CreateTestContext(writer) + + body, err := json.Marshal(map[string]any{ + "model": "claude-opus-4-5", + "messages": []map[string]any{ + {"role": "user", "content": "hi"}, + }, + "max_tokens": 1, + "stream": false, + }) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/v1/messages", bytes.NewReader(body)) + c.Request = req + + respBody := []byte(`{"error":{"message":"Prompt is too long"}}`) + resp := &http.Response{ + StatusCode: http.StatusBadRequest, + Header: http.Header{"X-Request-Id": []string{"req-1"}}, + Body: io.NopCloser(bytes.NewReader(respBody)), + } + + svc := &AntigravityGatewayService{ + tokenProvider: &AntigravityTokenProvider{}, + httpUpstream: &httpUpstreamStub{resp: resp}, + } + + account := &Account{ + ID: 1, + Name: "acc-1", + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + Status: StatusActive, + Concurrency: 1, + Credentials: map[string]any{ + "access_token": "token", + }, + } + + result, err := svc.Forward(context.Background(), c, account, body) + require.Nil(t, result) + + var promptErr *PromptTooLongError + require.ErrorAs(t, err, &promptErr) + require.Equal(t, http.StatusBadRequest, promptErr.StatusCode) + require.Equal(t, "req-1", promptErr.RequestID) + require.NotEmpty(t, promptErr.Body) + + raw, ok := c.Get(OpsUpstreamErrorsKey) + require.True(t, ok) + events, ok := raw.([]*OpsUpstreamErrorEvent) + require.True(t, ok) + require.Len(t, events, 1) + require.Equal(t, "prompt_too_long", events[0].Kind) +} diff --git a/backend/internal/service/api_key_auth_cache.go b/backend/internal/service/api_key_auth_cache.go index 5b476dbc..4b51fbbb 100644 --- a/backend/internal/service/api_key_auth_cache.go +++ b/backend/internal/service/api_key_auth_cache.go @@ -23,20 +23,21 @@ type APIKeyAuthUserSnapshot struct { // APIKeyAuthGroupSnapshot 分组快照 type APIKeyAuthGroupSnapshot struct { - ID int64 `json:"id"` - Name string `json:"name"` - Platform string `json:"platform"` - Status string `json:"status"` - SubscriptionType string `json:"subscription_type"` - RateMultiplier float64 `json:"rate_multiplier"` - DailyLimitUSD *float64 `json:"daily_limit_usd,omitempty"` - WeeklyLimitUSD *float64 `json:"weekly_limit_usd,omitempty"` - MonthlyLimitUSD *float64 `json:"monthly_limit_usd,omitempty"` - ImagePrice1K *float64 `json:"image_price_1k,omitempty"` - ImagePrice2K *float64 `json:"image_price_2k,omitempty"` - ImagePrice4K *float64 `json:"image_price_4k,omitempty"` - ClaudeCodeOnly bool `json:"claude_code_only"` - FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` + ID int64 `json:"id"` + Name string `json:"name"` + Platform string `json:"platform"` + Status string `json:"status"` + SubscriptionType string `json:"subscription_type"` + RateMultiplier float64 `json:"rate_multiplier"` + DailyLimitUSD *float64 `json:"daily_limit_usd,omitempty"` + WeeklyLimitUSD *float64 `json:"weekly_limit_usd,omitempty"` + MonthlyLimitUSD *float64 `json:"monthly_limit_usd,omitempty"` + ImagePrice1K *float64 `json:"image_price_1k,omitempty"` + ImagePrice2K *float64 `json:"image_price_2k,omitempty"` + ImagePrice4K *float64 `json:"image_price_4k,omitempty"` + ClaudeCodeOnly bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` + FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request,omitempty"` // Model routing is used by gateway account selection, so it must be part of auth cache snapshot. // Only anthropic groups use these fields; others may leave them empty. diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go index 521f1da5..8b74e7aa 100644 --- a/backend/internal/service/api_key_auth_cache_impl.go +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -207,22 +207,23 @@ func (s *APIKeyService) snapshotFromAPIKey(apiKey *APIKey) *APIKeyAuthSnapshot { } if apiKey.Group != nil { snapshot.Group = &APIKeyAuthGroupSnapshot{ - ID: apiKey.Group.ID, - Name: apiKey.Group.Name, - Platform: apiKey.Group.Platform, - Status: apiKey.Group.Status, - SubscriptionType: apiKey.Group.SubscriptionType, - RateMultiplier: apiKey.Group.RateMultiplier, - DailyLimitUSD: apiKey.Group.DailyLimitUSD, - WeeklyLimitUSD: apiKey.Group.WeeklyLimitUSD, - MonthlyLimitUSD: apiKey.Group.MonthlyLimitUSD, - ImagePrice1K: apiKey.Group.ImagePrice1K, - ImagePrice2K: apiKey.Group.ImagePrice2K, - ImagePrice4K: apiKey.Group.ImagePrice4K, - ClaudeCodeOnly: apiKey.Group.ClaudeCodeOnly, - FallbackGroupID: apiKey.Group.FallbackGroupID, - ModelRouting: apiKey.Group.ModelRouting, - ModelRoutingEnabled: apiKey.Group.ModelRoutingEnabled, + ID: apiKey.Group.ID, + Name: apiKey.Group.Name, + Platform: apiKey.Group.Platform, + Status: apiKey.Group.Status, + SubscriptionType: apiKey.Group.SubscriptionType, + RateMultiplier: apiKey.Group.RateMultiplier, + DailyLimitUSD: apiKey.Group.DailyLimitUSD, + WeeklyLimitUSD: apiKey.Group.WeeklyLimitUSD, + MonthlyLimitUSD: apiKey.Group.MonthlyLimitUSD, + ImagePrice1K: apiKey.Group.ImagePrice1K, + ImagePrice2K: apiKey.Group.ImagePrice2K, + ImagePrice4K: apiKey.Group.ImagePrice4K, + ClaudeCodeOnly: apiKey.Group.ClaudeCodeOnly, + FallbackGroupID: apiKey.Group.FallbackGroupID, + FallbackGroupIDOnInvalidRequest: apiKey.Group.FallbackGroupIDOnInvalidRequest, + ModelRouting: apiKey.Group.ModelRouting, + ModelRoutingEnabled: apiKey.Group.ModelRoutingEnabled, } } return snapshot @@ -250,23 +251,24 @@ func (s *APIKeyService) snapshotToAPIKey(key string, snapshot *APIKeyAuthSnapsho } if snapshot.Group != nil { apiKey.Group = &Group{ - ID: snapshot.Group.ID, - Name: snapshot.Group.Name, - Platform: snapshot.Group.Platform, - Status: snapshot.Group.Status, - Hydrated: true, - SubscriptionType: snapshot.Group.SubscriptionType, - RateMultiplier: snapshot.Group.RateMultiplier, - DailyLimitUSD: snapshot.Group.DailyLimitUSD, - WeeklyLimitUSD: snapshot.Group.WeeklyLimitUSD, - MonthlyLimitUSD: snapshot.Group.MonthlyLimitUSD, - ImagePrice1K: snapshot.Group.ImagePrice1K, - ImagePrice2K: snapshot.Group.ImagePrice2K, - ImagePrice4K: snapshot.Group.ImagePrice4K, - ClaudeCodeOnly: snapshot.Group.ClaudeCodeOnly, - FallbackGroupID: snapshot.Group.FallbackGroupID, - ModelRouting: snapshot.Group.ModelRouting, - ModelRoutingEnabled: snapshot.Group.ModelRoutingEnabled, + ID: snapshot.Group.ID, + Name: snapshot.Group.Name, + Platform: snapshot.Group.Platform, + Status: snapshot.Group.Status, + Hydrated: true, + SubscriptionType: snapshot.Group.SubscriptionType, + RateMultiplier: snapshot.Group.RateMultiplier, + DailyLimitUSD: snapshot.Group.DailyLimitUSD, + WeeklyLimitUSD: snapshot.Group.WeeklyLimitUSD, + MonthlyLimitUSD: snapshot.Group.MonthlyLimitUSD, + ImagePrice1K: snapshot.Group.ImagePrice1K, + ImagePrice2K: snapshot.Group.ImagePrice2K, + ImagePrice4K: snapshot.Group.ImagePrice4K, + ClaudeCodeOnly: snapshot.Group.ClaudeCodeOnly, + FallbackGroupID: snapshot.Group.FallbackGroupID, + FallbackGroupIDOnInvalidRequest: snapshot.Group.FallbackGroupIDOnInvalidRequest, + ModelRouting: snapshot.Group.ModelRouting, + ModelRoutingEnabled: snapshot.Group.ModelRoutingEnabled, } } return apiKey diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 9cb15e4a..a7ded8a9 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -55,6 +55,15 @@ func shortSessionHash(sessionHash string) string { return sessionHash[:8] } +func normalizeClaudeModelForAnthropic(requestedModel string) string { + for _, prefix := range anthropicPrefixMappings { + if strings.HasPrefix(requestedModel, prefix) { + return prefix + } + } + return requestedModel +} + // sseDataRe matches SSE data lines with optional whitespace after colon. // Some upstream APIs return non-standard "data:" without space (should be "data: "). var ( @@ -71,6 +80,12 @@ var ( "You are a file search specialist for Claude Code", // Explore Agent 版 "You are a helpful AI assistant tasked with summarizing conversations", // Compact 版 } + + anthropicPrefixMappings = []string{ + "claude-opus-4-5", + "claude-haiku-4-5", + "claude-sonnet-4-5", + } ) // ErrClaudeCodeOnly 表示分组仅允许 Claude Code 客户端访问 @@ -951,6 +966,10 @@ func (s *GatewayService) resolveGroupByID(ctx context.Context, groupID int64) (* return group, nil } +func (s *GatewayService) ResolveGroupByID(ctx context.Context, groupID int64) (*Group, error) { + return s.resolveGroupByID(ctx, groupID) +} + func (s *GatewayService) routingAccountIDsForRequest(ctx context.Context, groupID *int64, requestedModel string, platform string) []int64 { if groupID == nil || requestedModel == "" || platform != PlatformAnthropic { return nil @@ -1016,7 +1035,7 @@ func (s *GatewayService) checkClaudeCodeRestriction(ctx context.Context, groupID } // 强制平台模式不检查 Claude Code 限制 - if _, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string); hasForcePlatform { + if forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string); hasForcePlatform && forcePlatform != "" { return nil, groupID, nil } @@ -1719,6 +1738,9 @@ func (s *GatewayService) isModelSupportedByAccount(account *Account, requestedMo // Antigravity 平台使用专门的模型支持检查 return IsAntigravityModelSupported(requestedModel) } + if account.Platform == PlatformAnthropic { + requestedModel = normalizeClaudeModelForAnthropic(requestedModel) + } // 其他平台使用账户的模型支持检查 return account.IsModelSupported(requestedModel) } @@ -2115,17 +2137,29 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // 强制执行 cache_control 块数量限制(最多 4 个) body = enforceCacheControlLimit(body) - // 应用模型映射(仅对apikey类型账号) + // 应用模型映射(APIKey 明确映射优先,其次使用 Anthropic 前缀映射) originalModel := reqModel + mappedModel := reqModel + mappingSource := "" if account.Type == AccountTypeAPIKey { - mappedModel := account.GetMappedModel(reqModel) + mappedModel = account.GetMappedModel(reqModel) if mappedModel != reqModel { - // 替换请求体中的模型名 - body = s.replaceModelInBody(body, mappedModel) - reqModel = mappedModel - log.Printf("Model mapping applied: %s -> %s (account: %s)", originalModel, mappedModel, account.Name) + mappingSource = "account" } } + if mappingSource == "" && account.Platform == PlatformAnthropic { + normalized := normalizeClaudeModelForAnthropic(reqModel) + if normalized != reqModel { + mappedModel = normalized + mappingSource = "prefix" + } + } + if mappedModel != reqModel { + // 替换请求体中的模型名 + body = s.replaceModelInBody(body, mappedModel) + reqModel = mappedModel + log.Printf("Model mapping applied: %s -> %s (account: %s, source=%s)", originalModel, mappedModel, account.Name, mappingSource) + } // 获取凭证 token, tokenType, err := s.GetAccessToken(ctx, account) @@ -3426,16 +3460,28 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, return nil } - // 应用模型映射(仅对 apikey 类型账号) - if account.Type == AccountTypeAPIKey { - if reqModel != "" { - mappedModel := account.GetMappedModel(reqModel) + // 应用模型映射(APIKey 明确映射优先,其次使用 Anthropic 前缀映射) + if reqModel != "" { + mappedModel := reqModel + mappingSource := "" + if account.Type == AccountTypeAPIKey { + mappedModel = account.GetMappedModel(reqModel) if mappedModel != reqModel { - body = s.replaceModelInBody(body, mappedModel) - reqModel = mappedModel - log.Printf("CountTokens model mapping applied: %s -> %s (account: %s)", parsed.Model, mappedModel, account.Name) + mappingSource = "account" } } + if mappingSource == "" && account.Platform == PlatformAnthropic { + normalized := normalizeClaudeModelForAnthropic(reqModel) + if normalized != reqModel { + mappedModel = normalized + mappingSource = "prefix" + } + } + if mappedModel != reqModel { + body = s.replaceModelInBody(body, mappedModel) + reqModel = mappedModel + log.Printf("CountTokens model mapping applied: %s -> %s (account: %s, source=%s)", parsed.Model, mappedModel, account.Name, mappingSource) + } } // 获取凭证 diff --git a/backend/internal/service/group.go b/backend/internal/service/group.go index d6d1269b..9140b6d9 100644 --- a/backend/internal/service/group.go +++ b/backend/internal/service/group.go @@ -29,6 +29,8 @@ type Group struct { // Claude Code 客户端限制 ClaudeCodeOnly bool FallbackGroupID *int64 + // 无效请求兜底分组(仅 anthropic 平台使用) + FallbackGroupIDOnInvalidRequest *int64 // 模型路由配置 // key: 模型匹配模式(支持 * 通配符,如 "claude-opus-*") diff --git a/backend/migrations/043_add_group_invalid_request_fallback.sql b/backend/migrations/043_add_group_invalid_request_fallback.sql new file mode 100644 index 00000000..1c792704 --- /dev/null +++ b/backend/migrations/043_add_group_invalid_request_fallback.sql @@ -0,0 +1,13 @@ +-- 043_add_group_invalid_request_fallback.sql +-- 添加无效请求兜底分组配置 + +-- 添加 fallback_group_id_on_invalid_request 字段:无效请求兜底使用的分组 +ALTER TABLE groups +ADD COLUMN IF NOT EXISTS fallback_group_id_on_invalid_request BIGINT REFERENCES groups(id) ON DELETE SET NULL; + +-- 添加索引优化查询 +CREATE INDEX IF NOT EXISTS idx_groups_fallback_group_id_on_invalid_request +ON groups(fallback_group_id_on_invalid_request) WHERE deleted_at IS NULL AND fallback_group_id_on_invalid_request IS NOT NULL; + +-- 添加字段注释 +COMMENT ON COLUMN groups.fallback_group_id_on_invalid_request IS '无效请求兜底使用的分组 ID'; diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index abbd4ff6..2a4ff1c6 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -919,6 +919,11 @@ export default { fallbackHint: 'Non-Claude Code requests will use this group. Leave empty to reject directly.', noFallback: 'No Fallback (Reject)' }, + invalidRequestFallback: { + title: 'Invalid Request Fallback Group', + hint: 'Triggered only when upstream explicitly returns prompt too long. Leave empty to disable fallback.', + noFallback: 'No Fallback' + }, modelRouting: { title: 'Model Routing', tooltip: 'Configure specific model requests to be routed to designated accounts. Supports wildcard matching, e.g., claude-opus-* matches all opus models.', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 1b398e7a..d8c13cfc 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -995,6 +995,11 @@ export default { fallbackHint: '非 Claude Code 请求将使用此分组,留空则直接拒绝', noFallback: '不降级(直接拒绝)' }, + invalidRequestFallback: { + title: '无效请求兜底分组', + hint: '仅当上游明确返回 prompt too long 时才会触发,留空表示不兜底', + noFallback: '不兜底' + }, modelRouting: { title: '模型路由配置', tooltip: '配置特定模型请求优先路由到指定账号。支持通配符匹配,如 claude-opus-* 匹配所有 opus 模型。', diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 6fa57c26..fcd3748f 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -269,6 +269,7 @@ export interface Group { // Claude Code 客户端限制 claude_code_only: boolean fallback_group_id: number | null + fallback_group_id_on_invalid_request: number | null // 模型路由配置(仅 anthropic 平台使用) model_routing: Record | null model_routing_enabled: boolean @@ -322,6 +323,7 @@ export interface CreateGroupRequest { image_price_4k?: number | null claude_code_only?: boolean fallback_group_id?: number | null + fallback_group_id_on_invalid_request?: number | null } export interface UpdateGroupRequest { @@ -340,6 +342,7 @@ export interface UpdateGroupRequest { image_price_4k?: number | null claude_code_only?: boolean fallback_group_id?: number | null + fallback_group_id_on_invalid_request?: number | null } // ==================== Account & Proxy Types ==================== diff --git a/frontend/src/views/admin/GroupsView.vue b/frontend/src/views/admin/GroupsView.vue index 96457172..f3a407d7 100644 --- a/frontend/src/views/admin/GroupsView.vue +++ b/frontend/src/views/admin/GroupsView.vue @@ -460,6 +460,20 @@
+ +
+ + +

{{ t('admin.groups.invalidRequestFallback.hint') }}

+
+
@@ -1202,6 +1230,44 @@ const fallbackGroupOptionsForEdit = computed(() => { return options }) +// 无效请求兜底分组选项(创建时)- 仅包含 anthropic 平台、非订阅且未配置兜底的分组 +const invalidRequestFallbackOptions = computed(() => { + const options: { value: number | null; label: string }[] = [ + { value: null, label: t('admin.groups.invalidRequestFallback.noFallback') } + ] + const eligibleGroups = groups.value.filter( + (g) => + g.platform === 'anthropic' && + g.status === 'active' && + g.subscription_type !== 'subscription' && + g.fallback_group_id_on_invalid_request === null + ) + eligibleGroups.forEach((g) => { + options.push({ value: g.id, label: g.name }) + }) + return options +}) + +// 无效请求兜底分组选项(编辑时)- 排除自身 +const invalidRequestFallbackOptionsForEdit = computed(() => { + const options: { value: number | null; label: string }[] = [ + { value: null, label: t('admin.groups.invalidRequestFallback.noFallback') } + ] + const currentId = editingGroup.value?.id + const eligibleGroups = groups.value.filter( + (g) => + g.platform === 'anthropic' && + g.status === 'active' && + g.subscription_type !== 'subscription' && + g.fallback_group_id_on_invalid_request === null && + g.id !== currentId + ) + eligibleGroups.forEach((g) => { + options.push({ value: g.id, label: g.name }) + }) + return options +}) + const groups = ref([]) const loading = ref(false) const searchQuery = ref('') @@ -1243,6 +1309,7 @@ const createForm = reactive({ // Claude Code 客户端限制(仅 anthropic 平台使用) claude_code_only: false, fallback_group_id: null as number | null, + fallback_group_id_on_invalid_request: null as number | null, // 模型路由开关 model_routing_enabled: false }) @@ -1414,6 +1481,7 @@ const editForm = reactive({ // Claude Code 客户端限制(仅 anthropic 平台使用) claude_code_only: false, fallback_group_id: null as number | null, + fallback_group_id_on_invalid_request: null as number | null, // 模型路由开关 model_routing_enabled: false }) @@ -1497,6 +1565,7 @@ const closeCreateModal = () => { createForm.image_price_4k = null createForm.claude_code_only = false createForm.fallback_group_id = null + createForm.fallback_group_id_on_invalid_request = null createModelRoutingRules.value = [] } @@ -1546,6 +1615,7 @@ const handleEdit = async (group: Group) => { editForm.image_price_4k = group.image_price_4k editForm.claude_code_only = group.claude_code_only || false editForm.fallback_group_id = group.fallback_group_id + editForm.fallback_group_id_on_invalid_request = group.fallback_group_id_on_invalid_request editForm.model_routing_enabled = group.model_routing_enabled || false // 加载模型路由规则(异步加载账号名称) editModelRoutingRules.value = await convertApiFormatToRoutingRules(group.model_routing) @@ -1571,6 +1641,10 @@ const handleUpdateGroup = async () => { const payload = { ...editForm, fallback_group_id: editForm.fallback_group_id === null ? 0 : editForm.fallback_group_id, + fallback_group_id_on_invalid_request: + editForm.fallback_group_id_on_invalid_request === null + ? 0 + : editForm.fallback_group_id_on_invalid_request, model_routing: convertRoutingRulesToApiFormat(editModelRoutingRules.value) } await adminAPI.groups.update(editingGroup.value.id, payload) @@ -1612,6 +1686,16 @@ watch( if (newVal === 'subscription') { createForm.rate_multiplier = 1.0 createForm.is_exclusive = true + createForm.fallback_group_id_on_invalid_request = null + } + } +) + +watch( + () => createForm.platform, + (newVal) => { + if (!['anthropic', 'antigravity'].includes(newVal)) { + createForm.fallback_group_id_on_invalid_request = null } } ) From e316a923d447a0217dc7b668d815e613c8bfa69e Mon Sep 17 00:00:00 2001 From: song Date: Sat, 24 Jan 2026 01:14:44 +0800 Subject: [PATCH 037/214] fix(ops): count failover kinds with suffix --- backend/internal/repository/ops_repo_trends.go | 2 +- backend/internal/service/ops_metrics_collector.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/internal/repository/ops_repo_trends.go b/backend/internal/repository/ops_repo_trends.go index 3be490dd..14394ed8 100644 --- a/backend/internal/repository/ops_repo_trends.go +++ b/backend/internal/repository/ops_repo_trends.go @@ -59,7 +59,7 @@ error_buckets AS ( switch_buckets AS ( SELECT ` + errorBucketExpr + ` AS bucket, COALESCE(SUM(CASE - WHEN ev->>'kind' IN ('failover', 'retry_exhausted_failover', 'failover_on_400') THEN 1 + WHEN split_part(ev->>'kind', ':', 1) IN ('failover', 'retry_exhausted_failover', 'failover_on_400') THEN 1 ELSE 0 END), 0) AS switch_count FROM ops_error_logs diff --git a/backend/internal/service/ops_metrics_collector.go b/backend/internal/service/ops_metrics_collector.go index 73ad1fb0..a799d01b 100644 --- a/backend/internal/service/ops_metrics_collector.go +++ b/backend/internal/service/ops_metrics_collector.go @@ -561,7 +561,7 @@ func (c *OpsMetricsCollector) queryAccountSwitchCount(ctx context.Context, start q := ` SELECT COALESCE(SUM(CASE - WHEN ev->>'kind' IN ('failover', 'retry_exhausted_failover', 'failover_on_400') THEN 1 + WHEN split_part(ev->>'kind', ':', 1) IN ('failover', 'retry_exhausted_failover', 'failover_on_400') THEN 1 ELSE 0 END), 0) AS switch_count FROM ops_error_logs o From 4b57e80e6a2570fa89d3ff4516c807b55cc3668b Mon Sep 17 00:00:00 2001 From: song Date: Mon, 26 Jan 2026 23:40:48 +0800 Subject: [PATCH 038/214] =?UTF-8?q?fix:=20jsonb=5Fset=20=E5=B5=8C=E5=A5=97?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E6=97=A0=E6=B3=95=E5=88=9B=E5=BB=BA=E5=A4=9A?= =?UTF-8?q?=E5=B1=82=20key=20=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PostgreSQL jsonb_set 在 create_if_missing=true 时无法一次性创建多层嵌套路径。 例如设置 {antigravity_quota_scopes,gemini_image} 时,如果 antigravity_quota_scopes 不存在, jsonb_set 不会自动创建外层 key,导致更新静默失败(affected=1 但数据未变)。 修复方案:嵌套两次 jsonb_set,先确保外层 key 存在,再设置内层值。 影响范围: - SetAntigravityQuotaScopeLimit: Antigravity 平台按模型 scope 限流 - SetModelRateLimit: Anthropic 平台 Sonnet 模型限流 --- backend/internal/repository/account_repo.go | 29 ++++++++++++++++----- 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 440223eb..73f1cd97 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -809,12 +809,21 @@ func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, i return err } - path := "{antigravity_quota_scopes," + string(scope) + "}" + scopeKey := string(scope) client := clientFromContext(ctx, r.client) result, err := client.ExecContext( ctx, - "UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW(), last_used_at = NOW() WHERE id = $3 AND deleted_at IS NULL", - path, + `UPDATE accounts SET + extra = jsonb_set( + jsonb_set(COALESCE(extra, '{}'::jsonb), '{antigravity_quota_scopes}'::text[], COALESCE(extra->'antigravity_quota_scopes', '{}'::jsonb), true), + ARRAY['antigravity_quota_scopes', $1]::text[], + $2::jsonb, + true + ), + updated_at = NOW(), + last_used_at = NOW() + WHERE id = $3 AND deleted_at IS NULL`, + scopeKey, raw, id, ) @@ -829,6 +838,7 @@ func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, i if affected == 0 { return service.ErrAccountNotFound } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { log.Printf("[SchedulerOutbox] enqueue quota scope failed: account=%d err=%v", id, err) } @@ -849,12 +859,19 @@ func (r *accountRepository) SetModelRateLimit(ctx context.Context, id int64, sco return err } - path := "{model_rate_limits," + scope + "}" client := clientFromContext(ctx, r.client) result, err := client.ExecContext( ctx, - "UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW() WHERE id = $3 AND deleted_at IS NULL", - path, + `UPDATE accounts SET + extra = jsonb_set( + jsonb_set(COALESCE(extra, '{}'::jsonb), '{model_rate_limits}'::text[], COALESCE(extra->'model_rate_limits', '{}'::jsonb), true), + ARRAY['model_rate_limits', $1]::text[], + $2::jsonb, + true + ), + updated_at = NOW() + WHERE id = $3 AND deleted_at IS NULL`, + scope, raw, id, ) From 7cea6b6fc97940f007322e89f8360959ddd31776 Mon Sep 17 00:00:00 2001 From: song Date: Mon, 26 Jan 2026 20:51:40 +0800 Subject: [PATCH 039/214] =?UTF-8?q?feat(gemini):=20=E4=B8=BA=20Gemini=20?= =?UTF-8?q?=E5=8E=9F=E7=94=9F=E5=B9=B3=E5=8F=B0=E6=B7=BB=E5=8A=A0=E5=9B=BE?= =?UTF-8?q?=E7=89=87=E8=AE=A1=E8=B4=B9=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 对齐 Antigravity 平台的图片计费逻辑: - 添加 extractImageSize() 方法提取图片尺寸 - Forward() 和 ForwardNative() 返回 ImageCount/ImageSize - 支持分组自定义图片价格和倍率 --- .../service/gemini_messages_compat_service.go | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 75de90f2..1879a94c 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -800,6 +800,13 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex } } + // 图片生成计费 + imageCount := 0 + imageSize := s.extractImageSize(body) + if isImageGenerationModel(originalModel) { + imageCount = 1 + } + return &ForwardResult{ RequestID: requestID, Usage: *usage, @@ -807,6 +814,8 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex Stream: req.Stream, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, + ImageCount: imageCount, + ImageSize: imageSize, }, nil } @@ -1240,6 +1249,13 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. usage = &ClaudeUsage{} } + // 图片生成计费 + imageCount := 0 + imageSize := s.extractImageSize(body) + if isImageGenerationModel(originalModel) { + imageCount = 1 + } + return &ForwardResult{ RequestID: requestID, Usage: *usage, @@ -1247,6 +1263,8 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. Stream: stream, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, + ImageCount: imageCount, + ImageSize: imageSize, }, nil } @@ -2816,3 +2834,26 @@ func convertClaudeGenerationConfig(req map[string]any) map[string]any { } return out } + +// extractImageSize 从 Gemini 请求中提取 image_size 参数 +func (s *GeminiMessagesCompatService) extractImageSize(body []byte) string { + var req struct { + GenerationConfig *struct { + ImageConfig *struct { + ImageSize string `json:"imageSize"` + } `json:"imageConfig"` + } `json:"generationConfig"` + } + if err := json.Unmarshal(body, &req); err != nil { + return "2K" + } + + if req.GenerationConfig != nil && req.GenerationConfig.ImageConfig != nil { + size := strings.ToUpper(strings.TrimSpace(req.GenerationConfig.ImageConfig.ImageSize)) + if size == "1K" || size == "2K" || size == "4K" { + return size + } + } + + return "2K" +} From 08d6dc5227ea89e16b5c5c40a32e720172e2c037 Mon Sep 17 00:00:00 2001 From: song Date: Tue, 27 Jan 2026 09:34:10 +0800 Subject: [PATCH 040/214] =?UTF-8?q?feat(ops):=20=E8=BF=90=E7=BB=B4?= =?UTF-8?q?=E7=95=8C=E9=9D=A2=E5=B1=95=E7=A4=BA=20Antigravity=20=E8=B4=A6?= =?UTF-8?q?=E5=8F=B7=20scope=20=E7=BA=A7=E5=88=AB=E9=99=90=E6=B5=81?= =?UTF-8?q?=E7=BB=9F=E8=AE=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 在运维监控的并发/排队卡片中,为 Antigravity 平台账号显示各 scope (claude/gemini_text/gemini_image) 的限流数量统计,便于管理员了解 哪些 scope 正在被限流。 --- .../service/antigravity_quota_scope.go | 27 +++++++++++++ .../service/ops_account_availability.go | 21 ++++++++++ .../internal/service/ops_realtime_models.go | 39 ++++++++++--------- frontend/src/api/admin/ops.ts | 3 ++ frontend/src/i18n/locales/en.ts | 1 + frontend/src/i18n/locales/zh.ts | 1 + .../ops/components/OpsConcurrencyCard.vue | 24 ++++++++++++ 7 files changed, 98 insertions(+), 18 deletions(-) diff --git a/backend/internal/service/antigravity_quota_scope.go b/backend/internal/service/antigravity_quota_scope.go index a3b2ec66..34cd9a4c 100644 --- a/backend/internal/service/antigravity_quota_scope.go +++ b/backend/internal/service/antigravity_quota_scope.go @@ -89,3 +89,30 @@ func (a *Account) antigravityQuotaScopeResetAt(scope AntigravityQuotaScope) *tim } return &resetAt } + +var antigravityAllScopes = []AntigravityQuotaScope{ + AntigravityQuotaScopeClaude, + AntigravityQuotaScopeGeminiText, + AntigravityQuotaScopeGeminiImage, +} + +func (a *Account) GetAntigravityScopeRateLimits() map[string]int64 { + if a == nil || a.Platform != PlatformAntigravity { + return nil + } + now := time.Now() + result := make(map[string]int64) + for _, scope := range antigravityAllScopes { + resetAt := a.antigravityQuotaScopeResetAt(scope) + if resetAt != nil && now.Before(*resetAt) { + remainingSec := int64(time.Until(*resetAt).Seconds()) + if remainingSec > 0 { + result[string(scope)] = remainingSec + } + } + } + if len(result) == 0 { + return nil + } + return result +} diff --git a/backend/internal/service/ops_account_availability.go b/backend/internal/service/ops_account_availability.go index da66ec4d..9be06c15 100644 --- a/backend/internal/service/ops_account_availability.go +++ b/backend/internal/service/ops_account_availability.go @@ -67,6 +67,8 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi isAvailable := acc.Status == StatusActive && acc.Schedulable && !isRateLimited && !isOverloaded && !isTempUnsched + scopeRateLimits := acc.GetAntigravityScopeRateLimits() + if acc.Platform != "" { if _, ok := platform[acc.Platform]; !ok { platform[acc.Platform] = &PlatformAvailability{ @@ -84,6 +86,14 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi if hasError { p.ErrorCount++ } + if len(scopeRateLimits) > 0 { + if p.ScopeRateLimitCount == nil { + p.ScopeRateLimitCount = make(map[string]int64) + } + for scope := range scopeRateLimits { + p.ScopeRateLimitCount[scope]++ + } + } } for _, grp := range acc.Groups { @@ -108,6 +118,14 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi if hasError { g.ErrorCount++ } + if len(scopeRateLimits) > 0 { + if g.ScopeRateLimitCount == nil { + g.ScopeRateLimitCount = make(map[string]int64) + } + for scope := range scopeRateLimits { + g.ScopeRateLimitCount[scope]++ + } + } } displayGroupID := int64(0) @@ -140,6 +158,9 @@ func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFi item.RateLimitRemainingSec = &remainingSec } } + if len(scopeRateLimits) > 0 { + item.ScopeRateLimits = scopeRateLimits + } if isOverloaded && acc.OverloadUntil != nil { item.OverloadUntil = acc.OverloadUntil remainingSec := int64(time.Until(*acc.OverloadUntil).Seconds()) diff --git a/backend/internal/service/ops_realtime_models.go b/backend/internal/service/ops_realtime_models.go index f7514a24..c7e5715b 100644 --- a/backend/internal/service/ops_realtime_models.go +++ b/backend/internal/service/ops_realtime_models.go @@ -39,22 +39,24 @@ type AccountConcurrencyInfo struct { // PlatformAvailability aggregates account availability by platform. type PlatformAvailability struct { - Platform string `json:"platform"` - TotalAccounts int64 `json:"total_accounts"` - AvailableCount int64 `json:"available_count"` - RateLimitCount int64 `json:"rate_limit_count"` - ErrorCount int64 `json:"error_count"` + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ScopeRateLimitCount map[string]int64 `json:"scope_rate_limit_count,omitempty"` + ErrorCount int64 `json:"error_count"` } // GroupAvailability aggregates account availability by group. type GroupAvailability struct { - GroupID int64 `json:"group_id"` - GroupName string `json:"group_name"` - Platform string `json:"platform"` - TotalAccounts int64 `json:"total_accounts"` - AvailableCount int64 `json:"available_count"` - RateLimitCount int64 `json:"rate_limit_count"` - ErrorCount int64 `json:"error_count"` + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ScopeRateLimitCount map[string]int64 `json:"scope_rate_limit_count,omitempty"` + ErrorCount int64 `json:"error_count"` } // AccountAvailability represents current availability for a single account. @@ -72,10 +74,11 @@ type AccountAvailability struct { IsOverloaded bool `json:"is_overloaded"` HasError bool `json:"has_error"` - RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` - RateLimitRemainingSec *int64 `json:"rate_limit_remaining_sec"` - OverloadUntil *time.Time `json:"overload_until"` - OverloadRemainingSec *int64 `json:"overload_remaining_sec"` - ErrorMessage string `json:"error_message"` - TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"` + RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` + RateLimitRemainingSec *int64 `json:"rate_limit_remaining_sec"` + ScopeRateLimits map[string]int64 `json:"scope_rate_limits,omitempty"` + OverloadUntil *time.Time `json:"overload_until"` + OverloadRemainingSec *int64 `json:"overload_remaining_sec"` + ErrorMessage string `json:"error_message"` + TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"` } diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 4214450f..11a98e46 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -355,6 +355,7 @@ export interface PlatformAvailability { total_accounts: number available_count: number rate_limit_count: number + scope_rate_limit_count?: Record error_count: number } @@ -365,6 +366,7 @@ export interface GroupAvailability { total_accounts: number available_count: number rate_limit_count: number + scope_rate_limit_count?: Record error_count: number } @@ -379,6 +381,7 @@ export interface AccountAvailability { is_rate_limited: boolean rate_limit_reset_at?: string rate_limit_remaining_sec?: number + scope_rate_limits?: Record is_overloaded: boolean overload_until?: string overload_remaining_sec?: number diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 2a4ff1c6..a3c4c2da 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -2617,6 +2617,7 @@ export default { empty: 'No data', queued: 'Queue {count}', rateLimited: 'Rate-limited {count}', + scopeRateLimitedTooltip: '{scope} rate-limited ({count} accounts)', errorAccounts: 'Errors {count}', loadFailed: 'Failed to load concurrency data' }, diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index d8c13cfc..dec88217 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -2771,6 +2771,7 @@ export default { empty: '暂无数据', queued: '队列 {count}', rateLimited: '限流 {count}', + scopeRateLimitedTooltip: '{scope} 限流中 ({count} 个账号)', errorAccounts: '异常 {count}', loadFailed: '加载并发数据失败' }, diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index acb0de1b..9c1ae1c1 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -49,6 +49,7 @@ interface SummaryRow { total_accounts: number available_accounts: number rate_limited_accounts: number + scope_rate_limit_count?: Record error_accounts: number // 并发统计 total_concurrency: number @@ -102,6 +103,7 @@ const platformRows = computed((): SummaryRow[] => { total_accounts: totalAccounts, available_accounts: availableAccounts, rate_limited_accounts: safeNumber(avail.rate_limit_count), + scope_rate_limit_count: avail.scope_rate_limit_count, error_accounts: safeNumber(avail.error_count), total_concurrency: totalConcurrency, used_concurrency: usedConcurrency, @@ -141,6 +143,7 @@ const groupRows = computed((): SummaryRow[] => { total_accounts: totalAccounts, available_accounts: availableAccounts, rate_limited_accounts: safeNumber(avail.rate_limit_count), + scope_rate_limit_count: avail.scope_rate_limit_count, error_accounts: safeNumber(avail.error_count), total_concurrency: totalConcurrency, used_concurrency: usedConcurrency, @@ -269,6 +272,15 @@ function formatDuration(seconds: number): string { return `${hours}h` } +function formatScopeName(scope: string): string { + const names: Record = { + claude: 'Claude', + gemini_text: 'Gemini', + gemini_image: 'Image' + } + return names[scope] || scope +} + watch( () => realtimeEnabled.value, async (enabled) => { @@ -387,6 +399,18 @@ watch( {{ t('admin.ops.concurrency.rateLimited', { count: row.rate_limited_accounts }) }} + + + Date: Tue, 27 Jan 2026 11:04:41 +0800 Subject: [PATCH 041/214] =?UTF-8?q?feat(accounts):=20=E8=B4=A6=E5=8F=B7?= =?UTF-8?q?=E5=88=97=E8=A1=A8=E6=98=BE=E7=A4=BA=20Antigravity=20scope=20?= =?UTF-8?q?=E7=BA=A7=E5=88=AB=E9=99=90=E6=B5=81=E7=8A=B6=E6=80=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 后端 DTO 新增 scope_rate_limits 字段,从 extra 提取限流信息 - 前端状态列显示 scope 级限流徽章(Claude/Gemini/Image) - 清除速率限制时同时清除账号级和 scope 级限流(已有实现) --- backend/internal/handler/dto/mappers.go | 11 +++++ backend/internal/handler/dto/types.go | 8 ++++ .../account/AccountStatusIndicator.vue | 40 +++++++++++++++++++ frontend/src/i18n/locales/en.ts | 1 + frontend/src/i18n/locales/zh.ts | 1 + frontend/src/types/index.ts | 3 ++ 6 files changed, 64 insertions(+) diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index f1991c30..18bbec31 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -164,6 +164,17 @@ func AccountFromServiceShallow(a *service.Account) *Account { } } + if scopeLimits := a.GetAntigravityScopeRateLimits(); len(scopeLimits) > 0 { + out.ScopeRateLimits = make(map[string]ScopeRateLimitInfo, len(scopeLimits)) + now := time.Now() + for scope, remainingSec := range scopeLimits { + out.ScopeRateLimits[scope] = ScopeRateLimitInfo{ + ResetAt: now.Add(time.Duration(remainingSec) * time.Second), + RemainingSec: remainingSec, + } + } + } + return out } diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index b425523b..97bd2eca 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -2,6 +2,11 @@ package dto import "time" +type ScopeRateLimitInfo struct { + ResetAt time.Time `json:"reset_at"` + RemainingSec int64 `json:"remaining_sec"` +} + type User struct { ID int64 `json:"id"` Email string `json:"email"` @@ -97,6 +102,9 @@ type Account struct { RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` OverloadUntil *time.Time `json:"overload_until"` + // Antigravity scope 级限流状态(从 extra 提取) + ScopeRateLimits map[string]ScopeRateLimitInfo `json:"scope_rate_limits,omitempty"` + TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until"` TempUnschedulableReason string `json:"temp_unschedulable_reason"` diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index 7dae33bb..3c8aba97 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -62,6 +62,27 @@
+ + +
{ return new Date(props.account.rate_limit_reset_at) > new Date() }) +// Computed: active scope rate limits (Antigravity) +const activeScopeRateLimits = computed(() => { + const scopeLimits = props.account.scope_rate_limits + if (!scopeLimits) return [] + const now = new Date() + return Object.entries(scopeLimits) + .filter(([, info]) => new Date(info.reset_at) > now) + .map(([scope, info]) => ({ scope, reset_at: info.reset_at })) +}) + +const formatScopeName = (scope: string): string => { + const names: Record = { + claude: 'Claude', + gemini_text: 'Gemini', + gemini_image: 'Image' + } + return names[scope] || scope +} + // Computed: is overloaded (529) const isOverloaded = computed(() => { if (!props.account.overload_until) return false diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index a3c4c2da..0e4effc9 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1081,6 +1081,7 @@ export default { limited: 'Limited', tempUnschedulable: 'Temp Unschedulable', rateLimitedUntil: 'Rate limited until {time}', + scopeRateLimitedUntil: '{scope} rate limited until {time}', overloadedUntil: 'Overloaded until {time}', viewTempUnschedDetails: 'View temp unschedulable details' }, diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index dec88217..ae6b2abf 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1203,6 +1203,7 @@ export default { limited: '限流', tempUnschedulable: '临时不可调度', rateLimitedUntil: '限流中,重置时间:{time}', + scopeRateLimitedUntil: '{scope} 限流中,重置时间:{time}', overloadedUntil: '负载过重,重置时间:{time}', viewTempUnschedDetails: '查看临时不可调度详情' }, diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index fcd3748f..17377c98 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -470,6 +470,9 @@ export interface Account { temp_unschedulable_until: string | null temp_unschedulable_reason: string | null + // Antigravity scope 级限流状态 + scope_rate_limits?: Record + // Session window fields (5-hour window) session_window_start: string | null session_window_end: string | null From 877c17251d29554f1edd266aa122141d710d7430 Mon Sep 17 00:00:00 2001 From: song Date: Tue, 27 Jan 2026 13:09:56 +0800 Subject: [PATCH 042/214] =?UTF-8?q?feat(group):=20=E6=B7=BB=E5=8A=A0=20MCP?= =?UTF-8?q?=20XML=20=E6=B3=A8=E5=85=A5=E5=BC=80=E5=85=B3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Group 新增 mcp_xml_inject 字段,控制 Antigravity 平台的 MCP XML 协议注入 - 默认启用,可在分组设置中关闭 - 修复 GetByKeyForAuth 遗漏查询 mcp_xml_inject 字段导致认证缓存值始终为 false 的问题 --- backend/ent/group.go | 13 ++- backend/ent/group/group.go | 10 ++ backend/ent/group/where.go | 15 +++ backend/ent/group_create.go | 65 ++++++++++++ backend/ent/group_update.go | 34 +++++++ backend/ent/migrate/schema.go | 1 + backend/ent/mutation.go | 56 ++++++++++- backend/ent/runtime/runtime.go | 4 + backend/ent/schema/group.go | 5 + .../internal/handler/admin/group_handler.go | 4 + backend/internal/handler/dto/mappers.go | 1 + backend/internal/handler/dto/types.go | 3 + .../pkg/antigravity/request_transformer.go | 8 +- backend/internal/repository/api_key_repo.go | 2 + backend/internal/repository/group_repo.go | 6 +- backend/internal/service/admin_service.go | 12 +++ .../service/antigravity_gateway_service.go | 5 + .../internal/service/api_key_auth_cache.go | 1 + .../service/api_key_auth_cache_impl.go | 2 + backend/internal/service/group.go | 3 + .../044_add_group_mcp_xml_inject.sql | 2 + frontend/src/i18n/locales/en.ts | 6 ++ frontend/src/i18n/locales/zh.ts | 6 ++ frontend/src/types/index.ts | 2 + frontend/src/views/admin/GroupsView.vue | 98 ++++++++++++++++++- 25 files changed, 355 insertions(+), 9 deletions(-) create mode 100644 backend/migrations/044_add_group_mcp_xml_inject.sql diff --git a/backend/ent/group.go b/backend/ent/group.go index f91a4079..d2b6af9f 100644 --- a/backend/ent/group.go +++ b/backend/ent/group.go @@ -62,6 +62,8 @@ type Group struct { ModelRouting map[string][]int64 `json:"model_routing,omitempty"` // 是否启用模型路由配置 ModelRoutingEnabled bool `json:"model_routing_enabled,omitempty"` + // 是否注入 MCP XML 调用协议提示词(仅 antigravity 平台) + McpXMLInject bool `json:"mcp_xml_inject,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the GroupQuery when eager-loading is set. Edges GroupEdges `json:"edges"` @@ -170,7 +172,7 @@ func (*Group) scanValues(columns []string) ([]any, error) { switch columns[i] { case group.FieldModelRouting: values[i] = new([]byte) - case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled: + case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject: values[i] = new(sql.NullBool) case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k: values[i] = new(sql.NullFloat64) @@ -345,6 +347,12 @@ func (_m *Group) assignValues(columns []string, values []any) error { } else if value.Valid { _m.ModelRoutingEnabled = value.Bool } + case group.FieldMcpXMLInject: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field mcp_xml_inject", values[i]) + } else if value.Valid { + _m.McpXMLInject = value.Bool + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -506,6 +514,9 @@ func (_m *Group) String() string { builder.WriteString(", ") builder.WriteString("model_routing_enabled=") builder.WriteString(fmt.Sprintf("%v", _m.ModelRoutingEnabled)) + builder.WriteString(", ") + builder.WriteString("mcp_xml_inject=") + builder.WriteString(fmt.Sprintf("%v", _m.McpXMLInject)) builder.WriteByte(')') return builder.String() } diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go index b63827d3..aa9ff2ab 100644 --- a/backend/ent/group/group.go +++ b/backend/ent/group/group.go @@ -59,6 +59,8 @@ const ( FieldModelRouting = "model_routing" // FieldModelRoutingEnabled holds the string denoting the model_routing_enabled field in the database. FieldModelRoutingEnabled = "model_routing_enabled" + // FieldMcpXMLInject holds the string denoting the mcp_xml_inject field in the database. + FieldMcpXMLInject = "mcp_xml_inject" // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. EdgeAPIKeys = "api_keys" // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. @@ -156,6 +158,7 @@ var Columns = []string{ FieldFallbackGroupIDOnInvalidRequest, FieldModelRouting, FieldModelRoutingEnabled, + FieldMcpXMLInject, } var ( @@ -215,6 +218,8 @@ var ( DefaultClaudeCodeOnly bool // DefaultModelRoutingEnabled holds the default value on creation for the "model_routing_enabled" field. DefaultModelRoutingEnabled bool + // DefaultMcpXMLInject holds the default value on creation for the "mcp_xml_inject" field. + DefaultMcpXMLInject bool ) // OrderOption defines the ordering options for the Group queries. @@ -330,6 +335,11 @@ func ByModelRoutingEnabled(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldModelRoutingEnabled, opts...).ToFunc() } +// ByMcpXMLInject orders the results by the mcp_xml_inject field. +func ByMcpXMLInject(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMcpXMLInject, opts...).ToFunc() +} + // ByAPIKeysCount orders the results by api_keys count. func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go index 02cbb3d5..b6fa2c33 100644 --- a/backend/ent/group/where.go +++ b/backend/ent/group/where.go @@ -160,6 +160,11 @@ func ModelRoutingEnabled(v bool) predicate.Group { return predicate.Group(sql.FieldEQ(FieldModelRoutingEnabled, v)) } +// McpXMLInject applies equality check predicate on the "mcp_xml_inject" field. It's identical to McpXMLInjectEQ. +func McpXMLInject(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldMcpXMLInject, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Group { return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) @@ -1145,6 +1150,16 @@ func ModelRoutingEnabledNEQ(v bool) predicate.Group { return predicate.Group(sql.FieldNEQ(FieldModelRoutingEnabled, v)) } +// McpXMLInjectEQ applies the EQ predicate on the "mcp_xml_inject" field. +func McpXMLInjectEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldMcpXMLInject, v)) +} + +// McpXMLInjectNEQ applies the NEQ predicate on the "mcp_xml_inject" field. +func McpXMLInjectNEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldMcpXMLInject, v)) +} + // HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. func HasAPIKeys() predicate.Group { return predicate.Group(func(s *sql.Selector) { diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go index b08894da..b1ccc8e3 100644 --- a/backend/ent/group_create.go +++ b/backend/ent/group_create.go @@ -320,6 +320,20 @@ func (_c *GroupCreate) SetNillableModelRoutingEnabled(v *bool) *GroupCreate { return _c } +// SetMcpXMLInject sets the "mcp_xml_inject" field. +func (_c *GroupCreate) SetMcpXMLInject(v bool) *GroupCreate { + _c.mutation.SetMcpXMLInject(v) + return _c +} + +// SetNillableMcpXMLInject sets the "mcp_xml_inject" field if the given value is not nil. +func (_c *GroupCreate) SetNillableMcpXMLInject(v *bool) *GroupCreate { + if v != nil { + _c.SetMcpXMLInject(*v) + } + return _c +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate { _c.mutation.AddAPIKeyIDs(ids...) @@ -493,6 +507,10 @@ func (_c *GroupCreate) defaults() error { v := group.DefaultModelRoutingEnabled _c.mutation.SetModelRoutingEnabled(v) } + if _, ok := _c.mutation.McpXMLInject(); !ok { + v := group.DefaultMcpXMLInject + _c.mutation.SetMcpXMLInject(v) + } return nil } @@ -551,6 +569,9 @@ func (_c *GroupCreate) check() error { if _, ok := _c.mutation.ModelRoutingEnabled(); !ok { return &ValidationError{Name: "model_routing_enabled", err: errors.New(`ent: missing required field "Group.model_routing_enabled"`)} } + if _, ok := _c.mutation.McpXMLInject(); !ok { + return &ValidationError{Name: "mcp_xml_inject", err: errors.New(`ent: missing required field "Group.mcp_xml_inject"`)} + } return nil } @@ -666,6 +687,10 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { _spec.SetField(group.FieldModelRoutingEnabled, field.TypeBool, value) _node.ModelRoutingEnabled = value } + if value, ok := _c.mutation.McpXMLInject(); ok { + _spec.SetField(group.FieldMcpXMLInject, field.TypeBool, value) + _node.McpXMLInject = value + } if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1200,6 +1225,18 @@ func (u *GroupUpsert) UpdateModelRoutingEnabled() *GroupUpsert { return u } +// SetMcpXMLInject sets the "mcp_xml_inject" field. +func (u *GroupUpsert) SetMcpXMLInject(v bool) *GroupUpsert { + u.Set(group.FieldMcpXMLInject, v) + return u +} + +// UpdateMcpXMLInject sets the "mcp_xml_inject" field to the value that was provided on create. +func (u *GroupUpsert) UpdateMcpXMLInject() *GroupUpsert { + u.SetExcluded(group.FieldMcpXMLInject) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create. // Using this option is equivalent to using: // @@ -1686,6 +1723,20 @@ func (u *GroupUpsertOne) UpdateModelRoutingEnabled() *GroupUpsertOne { }) } +// SetMcpXMLInject sets the "mcp_xml_inject" field. +func (u *GroupUpsertOne) SetMcpXMLInject(v bool) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetMcpXMLInject(v) + }) +} + +// UpdateMcpXMLInject sets the "mcp_xml_inject" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateMcpXMLInject() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateMcpXMLInject() + }) +} + // Exec executes the query. func (u *GroupUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -2338,6 +2389,20 @@ func (u *GroupUpsertBulk) UpdateModelRoutingEnabled() *GroupUpsertBulk { }) } +// SetMcpXMLInject sets the "mcp_xml_inject" field. +func (u *GroupUpsertBulk) SetMcpXMLInject(v bool) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetMcpXMLInject(v) + }) +} + +// UpdateMcpXMLInject sets the "mcp_xml_inject" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateMcpXMLInject() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateMcpXMLInject() + }) +} + // Exec executes the query. func (u *GroupUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go index ce8f3748..332ae52a 100644 --- a/backend/ent/group_update.go +++ b/backend/ent/group_update.go @@ -448,6 +448,20 @@ func (_u *GroupUpdate) SetNillableModelRoutingEnabled(v *bool) *GroupUpdate { return _u } +// SetMcpXMLInject sets the "mcp_xml_inject" field. +func (_u *GroupUpdate) SetMcpXMLInject(v bool) *GroupUpdate { + _u.mutation.SetMcpXMLInject(v) + return _u +} + +// SetNillableMcpXMLInject sets the "mcp_xml_inject" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableMcpXMLInject(v *bool) *GroupUpdate { + if v != nil { + _u.SetMcpXMLInject(*v) + } + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate { _u.mutation.AddAPIKeyIDs(ids...) @@ -874,6 +888,9 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.ModelRoutingEnabled(); ok { _spec.SetField(group.FieldModelRoutingEnabled, field.TypeBool, value) } + if value, ok := _u.mutation.McpXMLInject(); ok { + _spec.SetField(group.FieldMcpXMLInject, field.TypeBool, value) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1602,6 +1619,20 @@ func (_u *GroupUpdateOne) SetNillableModelRoutingEnabled(v *bool) *GroupUpdateOn return _u } +// SetMcpXMLInject sets the "mcp_xml_inject" field. +func (_u *GroupUpdateOne) SetMcpXMLInject(v bool) *GroupUpdateOne { + _u.mutation.SetMcpXMLInject(v) + return _u +} + +// SetNillableMcpXMLInject sets the "mcp_xml_inject" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableMcpXMLInject(v *bool) *GroupUpdateOne { + if v != nil { + _u.SetMcpXMLInject(*v) + } + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne { _u.mutation.AddAPIKeyIDs(ids...) @@ -2058,6 +2089,9 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) if value, ok := _u.mutation.ModelRoutingEnabled(); ok { _spec.SetField(group.FieldModelRoutingEnabled, field.TypeBool, value) } + if value, ok := _u.mutation.McpXMLInject(); ok { + _spec.SetField(group.FieldMcpXMLInject, field.TypeBool, value) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index 5624c05b..3b83061e 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -229,6 +229,7 @@ var ( {Name: "fallback_group_id_on_invalid_request", Type: field.TypeInt64, Nullable: true}, {Name: "model_routing", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}}, {Name: "model_routing_enabled", Type: field.TypeBool, Default: false}, + {Name: "mcp_xml_inject", Type: field.TypeBool, Default: true}, } // GroupsTable holds the schema information for the "groups" table. GroupsTable = &schema.Table{ diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 69801b9f..98195985 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -3868,6 +3868,7 @@ type GroupMutation struct { addfallback_group_id_on_invalid_request *int64 model_routing *map[string][]int64 model_routing_enabled *bool + mcp_xml_inject *bool clearedFields map[string]struct{} api_keys map[int64]struct{} removedapi_keys map[int64]struct{} @@ -5133,6 +5134,42 @@ func (m *GroupMutation) ResetModelRoutingEnabled() { m.model_routing_enabled = nil } +// SetMcpXMLInject sets the "mcp_xml_inject" field. +func (m *GroupMutation) SetMcpXMLInject(b bool) { + m.mcp_xml_inject = &b +} + +// McpXMLInject returns the value of the "mcp_xml_inject" field in the mutation. +func (m *GroupMutation) McpXMLInject() (r bool, exists bool) { + v := m.mcp_xml_inject + if v == nil { + return + } + return *v, true +} + +// OldMcpXMLInject returns the old "mcp_xml_inject" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldMcpXMLInject(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMcpXMLInject is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMcpXMLInject requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMcpXMLInject: %w", err) + } + return oldValue.McpXMLInject, nil +} + +// ResetMcpXMLInject resets all changes to the "mcp_xml_inject" field. +func (m *GroupMutation) ResetMcpXMLInject() { + m.mcp_xml_inject = nil +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. func (m *GroupMutation) AddAPIKeyIDs(ids ...int64) { if m.api_keys == nil { @@ -5491,7 +5528,7 @@ func (m *GroupMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *GroupMutation) Fields() []string { - fields := make([]string, 0, 22) + fields := make([]string, 0, 23) if m.created_at != nil { fields = append(fields, group.FieldCreatedAt) } @@ -5558,6 +5595,9 @@ func (m *GroupMutation) Fields() []string { if m.model_routing_enabled != nil { fields = append(fields, group.FieldModelRoutingEnabled) } + if m.mcp_xml_inject != nil { + fields = append(fields, group.FieldMcpXMLInject) + } return fields } @@ -5610,6 +5650,8 @@ func (m *GroupMutation) Field(name string) (ent.Value, bool) { return m.ModelRouting() case group.FieldModelRoutingEnabled: return m.ModelRoutingEnabled() + case group.FieldMcpXMLInject: + return m.McpXMLInject() } return nil, false } @@ -5663,6 +5705,8 @@ func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, e return m.OldModelRouting(ctx) case group.FieldModelRoutingEnabled: return m.OldModelRoutingEnabled(ctx) + case group.FieldMcpXMLInject: + return m.OldMcpXMLInject(ctx) } return nil, fmt.Errorf("unknown Group field %s", name) } @@ -5826,6 +5870,13 @@ func (m *GroupMutation) SetField(name string, value ent.Value) error { } m.SetModelRoutingEnabled(v) return nil + case group.FieldMcpXMLInject: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMcpXMLInject(v) + return nil } return fmt.Errorf("unknown Group field %s", name) } @@ -6133,6 +6184,9 @@ func (m *GroupMutation) ResetField(name string) error { case group.FieldModelRoutingEnabled: m.ResetModelRoutingEnabled() return nil + case group.FieldMcpXMLInject: + m.ResetMcpXMLInject() + return nil } return fmt.Errorf("unknown Group field %s", name) } diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 3ddb206d..e7f33598 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -284,6 +284,10 @@ func init() { groupDescModelRoutingEnabled := groupFields[18].Descriptor() // group.DefaultModelRoutingEnabled holds the default value on creation for the model_routing_enabled field. group.DefaultModelRoutingEnabled = groupDescModelRoutingEnabled.Default.(bool) + // groupDescMcpXMLInject is the schema descriptor for mcp_xml_inject field. + groupDescMcpXMLInject := groupFields[19].Descriptor() + // group.DefaultMcpXMLInject holds the default value on creation for the mcp_xml_inject field. + group.DefaultMcpXMLInject = groupDescMcpXMLInject.Default.(bool) promocodeFields := schema.PromoCode{}.Fields() _ = promocodeFields // promocodeDescCode is the schema descriptor for code field. diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index 51cae1a6..58583752 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -110,6 +110,11 @@ func (Group) Fields() []ent.Field { field.Bool("model_routing_enabled"). Default(false). Comment("是否启用模型路由配置"), + + // MCP XML 协议注入开关 (added by migration 042) + field.Bool("mcp_xml_inject"). + Default(true). + Comment("是否注入 MCP XML 调用协议提示词(仅 antigravity 平台)"), } } diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go index 8229a780..1df5af8c 100644 --- a/backend/internal/handler/admin/group_handler.go +++ b/backend/internal/handler/admin/group_handler.go @@ -44,6 +44,7 @@ type CreateGroupRequest struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled bool `json:"model_routing_enabled"` + MCPXMLInject *bool `json:"mcp_xml_inject"` } // UpdateGroupRequest represents update group request @@ -68,6 +69,7 @@ type UpdateGroupRequest struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled *bool `json:"model_routing_enabled"` + MCPXMLInject *bool `json:"mcp_xml_inject"` } // List handles listing all groups with pagination @@ -174,6 +176,7 @@ func (h *GroupHandler) Create(c *gin.Context) { FallbackGroupIDOnInvalidRequest: req.FallbackGroupIDOnInvalidRequest, ModelRouting: req.ModelRouting, ModelRoutingEnabled: req.ModelRoutingEnabled, + MCPXMLInject: req.MCPXMLInject, }) if err != nil { response.ErrorFrom(c, err) @@ -217,6 +220,7 @@ func (h *GroupHandler) Update(c *gin.Context) { FallbackGroupIDOnInvalidRequest: req.FallbackGroupIDOnInvalidRequest, ModelRouting: req.ModelRouting, ModelRoutingEnabled: req.ModelRoutingEnabled, + MCPXMLInject: req.MCPXMLInject, }) if err != nil { response.ErrorFrom(c, err) diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index 18bbec31..e8420336 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -92,6 +92,7 @@ func GroupFromServiceShallow(g *service.Group) *Group { FallbackGroupIDOnInvalidRequest: g.FallbackGroupIDOnInvalidRequest, ModelRouting: g.ModelRouting, ModelRoutingEnabled: g.ModelRoutingEnabled, + MCPXMLInject: g.MCPXMLInject, CreatedAt: g.CreatedAt, UpdatedAt: g.UpdatedAt, AccountCount: g.AccountCount, diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 97bd2eca..abb9494a 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -69,6 +69,9 @@ type Group struct { ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled bool `json:"model_routing_enabled"` + // MCP XML 协议注入(仅 antigravity 平台使用) + MCPXMLInject bool `json:"mcp_xml_inject"` + CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 80063cb8..720e6f6a 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -44,11 +44,13 @@ type TransformOptions struct { // IdentityPatch 可选:自定义注入到 systemInstruction 开头的身份防护提示词; // 为空时使用默认模板(包含 [IDENTITY_PATCH] 及 SYSTEM_PROMPT_BEGIN 标记)。 IdentityPatch string + EnableMCPXML bool } func DefaultTransformOptions() TransformOptions { return TransformOptions{ EnableIdentityPatch: true, + EnableMCPXML: true, } } @@ -257,8 +259,8 @@ func buildSystemInstruction(system json.RawMessage, modelName string, opts Trans // 添加用户的 system prompt parts = append(parts, userSystemParts...) - // 检测是否有 MCP 工具,如有则注入 XML 调用协议 - if hasMCPTools(tools) { + // 检测是否有 MCP 工具,如有且启用了 MCP XML 注入则注入 XML 调用协议 + if opts.EnableMCPXML && hasMCPTools(tools) { parts = append(parts, GeminiPart{Text: mcpXMLProtocol}) } @@ -491,7 +493,7 @@ func parseToolResultContent(content json.RawMessage, isError bool) string { // buildGenerationConfig 构建 generationConfig const ( - defaultMaxOutputTokens = 64000 + defaultMaxOutputTokens = 64000 maxOutputTokensUpperBound = 65000 maxOutputTokensClaude = 64000 ) diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index 9938a36d..e9af365c 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -139,6 +139,7 @@ func (r *apiKeyRepository) GetByKeyForAuth(ctx context.Context, key string) (*se group.FieldFallbackGroupIDOnInvalidRequest, group.FieldModelRoutingEnabled, group.FieldModelRouting, + group.FieldMcpXMLInject, ) }). Only(ctx) @@ -428,6 +429,7 @@ func groupEntityToService(g *dbent.Group) *service.Group { FallbackGroupIDOnInvalidRequest: g.FallbackGroupIDOnInvalidRequest, ModelRouting: g.ModelRouting, ModelRoutingEnabled: g.ModelRoutingEnabled, + MCPXMLInject: g.McpXMLInject, CreatedAt: g.CreatedAt, UpdatedAt: g.UpdatedAt, } diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index f207f479..116e45a3 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -51,7 +51,8 @@ func (r *groupRepository) Create(ctx context.Context, groupIn *service.Group) er SetClaudeCodeOnly(groupIn.ClaudeCodeOnly). SetNillableFallbackGroupID(groupIn.FallbackGroupID). SetNillableFallbackGroupIDOnInvalidRequest(groupIn.FallbackGroupIDOnInvalidRequest). - SetModelRoutingEnabled(groupIn.ModelRoutingEnabled) + SetModelRoutingEnabled(groupIn.ModelRoutingEnabled). + SetMcpXMLInject(groupIn.MCPXMLInject) // 设置模型路由配置 if groupIn.ModelRouting != nil { @@ -109,7 +110,8 @@ func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) er SetNillableImagePrice4k(groupIn.ImagePrice4K). SetDefaultValidityDays(groupIn.DefaultValidityDays). SetClaudeCodeOnly(groupIn.ClaudeCodeOnly). - SetModelRoutingEnabled(groupIn.ModelRoutingEnabled) + SetModelRoutingEnabled(groupIn.ModelRoutingEnabled). + SetMcpXMLInject(groupIn.MCPXMLInject) // 处理 FallbackGroupID:nil 时清除,否则设置 if groupIn.FallbackGroupID != nil { diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 12f01810..392c7aa2 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -113,6 +113,7 @@ type CreateGroupInput struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 ModelRoutingEnabled bool // 是否启用模型路由 + MCPXMLInject *bool } type UpdateGroupInput struct { @@ -137,6 +138,7 @@ type UpdateGroupInput struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 ModelRoutingEnabled *bool // 是否启用模型路由 + MCPXMLInject *bool } type CreateAccountInput struct { @@ -587,6 +589,12 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn } } + // MCPXMLInject:默认为 true,仅当显式传入 false 时关闭 + mcpXMLInject := true + if input.MCPXMLInject != nil { + mcpXMLInject = *input.MCPXMLInject + } + group := &Group{ Name: input.Name, Description: input.Description, @@ -605,6 +613,7 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn FallbackGroupID: input.FallbackGroupID, FallbackGroupIDOnInvalidRequest: fallbackOnInvalidRequest, ModelRouting: input.ModelRouting, + MCPXMLInject: mcpXMLInject, } if err := s.groupRepo.Create(ctx, group); err != nil { return nil, err @@ -785,6 +794,9 @@ func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *Upd if input.ModelRoutingEnabled != nil { group.ModelRoutingEnabled = *input.ModelRoutingEnabled } + if input.MCPXMLInject != nil { + group.MCPXMLInject = *input.MCPXMLInject + } if err := s.groupRepo.Update(ctx, group); err != nil { return nil, err diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index d3c15418..dbdfd374 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -19,6 +19,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/gin-gonic/gin" "github.com/google/uuid" ) @@ -552,6 +553,10 @@ func (s *AntigravityGatewayService) getClaudeTransformOptions(ctx context.Contex } opts.EnableIdentityPatch = s.settingService.IsIdentityPatchEnabled(ctx) opts.IdentityPatch = s.settingService.GetIdentityPatchPrompt(ctx) + + if group, ok := ctx.Value(ctxkey.Group).(*Group); ok && group != nil { + opts.EnableMCPXML = group.MCPXMLInject + } return opts } diff --git a/backend/internal/service/api_key_auth_cache.go b/backend/internal/service/api_key_auth_cache.go index 4b51fbbb..5cb2fbfb 100644 --- a/backend/internal/service/api_key_auth_cache.go +++ b/backend/internal/service/api_key_auth_cache.go @@ -43,6 +43,7 @@ type APIKeyAuthGroupSnapshot struct { // Only anthropic groups use these fields; others may leave them empty. ModelRouting map[string][]int64 `json:"model_routing,omitempty"` ModelRoutingEnabled bool `json:"model_routing_enabled"` + MCPXMLInject bool `json:"mcp_xml_inject"` } // APIKeyAuthCacheEntry 缓存条目,支持负缓存 diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go index 8b74e7aa..b9f0ef36 100644 --- a/backend/internal/service/api_key_auth_cache_impl.go +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -224,6 +224,7 @@ func (s *APIKeyService) snapshotFromAPIKey(apiKey *APIKey) *APIKeyAuthSnapshot { FallbackGroupIDOnInvalidRequest: apiKey.Group.FallbackGroupIDOnInvalidRequest, ModelRouting: apiKey.Group.ModelRouting, ModelRoutingEnabled: apiKey.Group.ModelRoutingEnabled, + MCPXMLInject: apiKey.Group.MCPXMLInject, } } return snapshot @@ -269,6 +270,7 @@ func (s *APIKeyService) snapshotToAPIKey(key string, snapshot *APIKeyAuthSnapsho FallbackGroupIDOnInvalidRequest: snapshot.Group.FallbackGroupIDOnInvalidRequest, ModelRouting: snapshot.Group.ModelRouting, ModelRoutingEnabled: snapshot.Group.ModelRoutingEnabled, + MCPXMLInject: snapshot.Group.MCPXMLInject, } } return apiKey diff --git a/backend/internal/service/group.go b/backend/internal/service/group.go index 9140b6d9..7f1825c6 100644 --- a/backend/internal/service/group.go +++ b/backend/internal/service/group.go @@ -38,6 +38,9 @@ type Group struct { ModelRouting map[string][]int64 ModelRoutingEnabled bool + // MCP XML 协议注入开关(仅 antigravity 平台使用) + MCPXMLInject bool + CreatedAt time.Time UpdatedAt time.Time diff --git a/backend/migrations/044_add_group_mcp_xml_inject.sql b/backend/migrations/044_add_group_mcp_xml_inject.sql new file mode 100644 index 00000000..7db71dd8 --- /dev/null +++ b/backend/migrations/044_add_group_mcp_xml_inject.sql @@ -0,0 +1,2 @@ +-- Add mcp_xml_inject field to groups table (for antigravity platform) +ALTER TABLE groups ADD COLUMN mcp_xml_inject BOOLEAN NOT NULL DEFAULT true; diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 0e4effc9..84d1e641 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -943,6 +943,12 @@ export default { noRulesHint: 'Add routing rules to route specific model requests to designated accounts', searchAccountPlaceholder: 'Search accounts...', accountsHint: 'Select accounts to prioritize for this model pattern' + }, + mcpXml: { + title: 'MCP XML Protocol Injection', + tooltip: 'When enabled, if the request contains MCP tools, an XML format call protocol prompt will be injected into the system prompt. Disable this to avoid interference with certain clients.', + enabled: 'Enabled', + disabled: 'Disabled' } }, diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index ae6b2abf..468ee5ad 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1019,6 +1019,12 @@ export default { noRulesHint: '添加路由规则以将特定模型请求优先路由到指定账号', searchAccountPlaceholder: '搜索账号...', accountsHint: '选择此模型模式优先使用的账号' + }, + mcpXml: { + title: 'MCP XML 协议注入', + tooltip: '启用后,当请求包含 MCP 工具时,会在 system prompt 中注入 XML 格式调用协议提示词。关闭此选项可避免对某些客户端造成干扰。', + enabled: '已启用', + disabled: '已禁用' } }, diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 17377c98..1e23a85f 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -273,6 +273,8 @@ export interface Group { // 模型路由配置(仅 anthropic 平台使用) model_routing: Record | null model_routing_enabled: boolean + // MCP XML 协议注入(仅 antigravity 平台使用) + mcp_xml_inject: boolean account_count?: number created_at: string updated_at: string diff --git a/frontend/src/views/admin/GroupsView.vue b/frontend/src/views/admin/GroupsView.vue index f3a407d7..ffeb960e 100644 --- a/frontend/src/views/admin/GroupsView.vue +++ b/frontend/src/views/admin/GroupsView.vue @@ -404,6 +404,51 @@
+ +
+
+ +
+ +
+
+

+ {{ t('admin.groups.mcpXml.tooltip') }} +

+
+
+
+
+
+
+ + + {{ createForm.mcp_xml_inject ? t('admin.groups.mcpXml.enabled') : t('admin.groups.mcpXml.disabled') }} + +
+
+
@@ -862,6 +907,51 @@
+ +
+
+ +
+ +
+
+

+ {{ t('admin.groups.mcpXml.tooltip') }} +

+
+
+
+
+
+
+ + + {{ editForm.mcp_xml_inject ? t('admin.groups.mcpXml.enabled') : t('admin.groups.mcpXml.disabled') }} + +
+
+
@@ -1311,7 +1401,8 @@ const createForm = reactive({ fallback_group_id: null as number | null, fallback_group_id_on_invalid_request: null as number | null, // 模型路由开关 - model_routing_enabled: false + model_routing_enabled: false, + mcp_xml_inject: true }) // 简单账号类型(用于模型路由选择) @@ -1483,7 +1574,8 @@ const editForm = reactive({ fallback_group_id: null as number | null, fallback_group_id_on_invalid_request: null as number | null, // 模型路由开关 - model_routing_enabled: false + model_routing_enabled: false, + mcp_xml_inject: true }) // 根据分组类型返回不同的删除确认消息 @@ -1566,6 +1658,7 @@ const closeCreateModal = () => { createForm.claude_code_only = false createForm.fallback_group_id = null createForm.fallback_group_id_on_invalid_request = null + createForm.mcp_xml_inject = true createModelRoutingRules.value = [] } @@ -1617,6 +1710,7 @@ const handleEdit = async (group: Group) => { editForm.fallback_group_id = group.fallback_group_id editForm.fallback_group_id_on_invalid_request = group.fallback_group_id_on_invalid_request editForm.model_routing_enabled = group.model_routing_enabled || false + editForm.mcp_xml_inject = group.mcp_xml_inject ?? true // 加载模型路由规则(异步加载账号名称) editModelRoutingRules.value = await convertApiFormatToRoutingRules(group.model_routing) showEditModal.value = true From f761afb1efb568a81f9500979c6df50de0fe1bff Mon Sep 17 00:00:00 2001 From: song Date: Wed, 28 Jan 2026 00:01:03 +0800 Subject: [PATCH 043/214] =?UTF-8?q?antigravity:=20=E5=8C=BA=E5=88=86?= =?UTF-8?q?=E5=88=87=E6=8D=A2=E5=90=8E=E9=87=8D=E8=AF=95=E6=AC=A1=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/handler/gateway_handler.go | 16 ++++++-- .../internal/handler/gemini_v1beta_handler.go | 9 ++++- backend/internal/pkg/ctxkey/ctxkey.go | 3 ++ .../service/antigravity_gateway_service.go | 38 +++++++++++++++++-- .../antigravity_gateway_service_test.go | 25 ++++++++++++ backend/internal/service/ops_retry.go | 7 +++- 6 files changed, 87 insertions(+), 11 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index cd622a3b..fdb6411c 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -276,10 +276,14 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 转发请求 - 根据账号平台分流 var result *service.ForwardResult + requestCtx := c.Request.Context() + if switchCount > 0 { + requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, switchCount) + } if account.Platform == service.PlatformAntigravity { - result, err = h.antigravityGatewayService.ForwardGemini(c.Request.Context(), c, account, reqModel, "generateContent", reqStream, body) + result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, reqModel, "generateContent", reqStream, body) } else { - result, err = h.geminiCompatService.Forward(c.Request.Context(), c, account, body) + result, err = h.geminiCompatService.Forward(requestCtx, c, account, body) } if accountReleaseFunc != nil { accountReleaseFunc() @@ -419,10 +423,14 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 转发请求 - 根据账号平台分流 var result *service.ForwardResult + requestCtx := c.Request.Context() + if switchCount > 0 { + requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, switchCount) + } if account.Platform == service.PlatformAntigravity { - result, err = h.antigravityGatewayService.Forward(c.Request.Context(), c, account, body) + result, err = h.antigravityGatewayService.Forward(requestCtx, c, account, body) } else { - result, err = h.gatewayService.Forward(c.Request.Context(), c, account, parsedReq) + result, err = h.gatewayService.Forward(requestCtx, c, account, parsedReq) } if accountReleaseFunc != nil { accountReleaseFunc() diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index c7646b38..1946aeb2 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -10,6 +10,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/gemini" "github.com/Wei-Shaw/sub2api/internal/pkg/googleapi" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" @@ -288,10 +289,14 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { // 5) forward (根据平台分流) var result *service.ForwardResult + requestCtx := c.Request.Context() + if switchCount > 0 { + requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, switchCount) + } if account.Platform == service.PlatformAntigravity { - result, err = h.antigravityGatewayService.ForwardGemini(c.Request.Context(), c, account, modelName, action, stream, body) + result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, modelName, action, stream, body) } else { - result, err = h.geminiCompatService.ForwardNative(c.Request.Context(), c, account, modelName, action, stream, body) + result, err = h.geminiCompatService.ForwardNative(requestCtx, c, account, modelName, action, stream, body) } if accountReleaseFunc != nil { accountReleaseFunc() diff --git a/backend/internal/pkg/ctxkey/ctxkey.go b/backend/internal/pkg/ctxkey/ctxkey.go index 27bb5ac5..fd7512f7 100644 --- a/backend/internal/pkg/ctxkey/ctxkey.go +++ b/backend/internal/pkg/ctxkey/ctxkey.go @@ -14,6 +14,9 @@ const ( // RetryCount 表示当前请求在网关层的重试次数(用于 Ops 记录与排障)。 RetryCount Key = "ctx_retry_count" + // AccountSwitchCount 表示请求过程中发生的账号切换次数 + AccountSwitchCount Key = "ctx_account_switch_count" + // IsClaudeCodeClient 标识当前请求是否来自 Claude Code 客户端 IsClaudeCodeClient Key = "ctx_is_claude_code_client" // Group 认证后的分组信息,由 API Key 认证中间件设置 diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index dbdfd374..db988565 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -33,6 +33,7 @@ const ( const ( antigravityMaxRetriesEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES" + antigravityMaxRetriesAfterSwitchEnv = "GATEWAY_ANTIGRAVITY_AFTER_SWITCHMAX_RETRIES" antigravityMaxRetriesClaudeEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES_CLAUDE" antigravityMaxRetriesGeminiTextEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES_GEMINI_TEXT" antigravityMaxRetriesGeminiImageEnv = "GATEWAY_ANTIGRAVITY_MAX_RETRIES_GEMINI_IMAGE" @@ -745,6 +746,8 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, if antigravityUseMappedModelForBilling() && strings.TrimSpace(mappedModel) != "" { billingModel = mappedModel } + afterSwitch := antigravityHasAccountSwitch(ctx) + maxRetries := antigravityMaxRetriesForModel(originalModel, afterSwitch) // 获取 access_token if s.tokenProvider == nil { @@ -793,7 +796,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, httpUpstream: s.httpUpstream, settingService: s.settingService, handleError: s.handleUpstreamError, - maxRetries: antigravityMaxRetriesForModel(originalModel), + maxRetries: maxRetries, }) if err != nil { return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") @@ -870,7 +873,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, httpUpstream: s.httpUpstream, settingService: s.settingService, handleError: s.handleUpstreamError, - maxRetries: antigravityMaxRetriesForModel(originalModel), + maxRetries: maxRetries, }) if retryErr != nil { appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ @@ -1387,6 +1390,8 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co if antigravityUseMappedModelForBilling() && strings.TrimSpace(mappedModel) != "" { billingModel = mappedModel } + afterSwitch := antigravityHasAccountSwitch(ctx) + maxRetries := antigravityMaxRetriesForModel(originalModel, afterSwitch) // 获取 access_token if s.tokenProvider == nil { @@ -1444,7 +1449,7 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co httpUpstream: s.httpUpstream, settingService: s.settingService, handleError: s.handleUpstreamError, - maxRetries: antigravityMaxRetriesForModel(originalModel), + maxRetries: maxRetries, }) if err != nil { return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") @@ -1641,6 +1646,16 @@ func antigravityUseScopeRateLimit() bool { return v == "1" || v == "true" || v == "yes" || v == "on" } +func antigravityHasAccountSwitch(ctx context.Context) bool { + if ctx == nil { + return false + } + if v, ok := ctx.Value(ctxkey.AccountSwitchCount).(int); ok { + return v > 0 + } + return false +} + func antigravityMaxRetries() int { raw := strings.TrimSpace(os.Getenv(antigravityMaxRetriesEnv)) if raw == "" { @@ -1653,9 +1668,21 @@ func antigravityMaxRetries() int { return value } +func antigravityMaxRetriesAfterSwitch() int { + raw := strings.TrimSpace(os.Getenv(antigravityMaxRetriesAfterSwitchEnv)) + if raw == "" { + return antigravityMaxRetries() + } + value, err := strconv.Atoi(raw) + if err != nil || value <= 0 { + return antigravityMaxRetries() + } + return value +} + // antigravityMaxRetriesForModel 根据模型类型获取重试次数 // 优先使用模型细分配置,未设置则回退到平台级配置 -func antigravityMaxRetriesForModel(model string) int { +func antigravityMaxRetriesForModel(model string, afterSwitch bool) int { var envKey string if strings.HasPrefix(model, "claude-") { envKey = antigravityMaxRetriesClaudeEnv @@ -1672,6 +1699,9 @@ func antigravityMaxRetriesForModel(model string) int { } } } + if afterSwitch { + return antigravityMaxRetriesAfterSwitch() + } return antigravityMaxRetries() } diff --git a/backend/internal/service/antigravity_gateway_service_test.go b/backend/internal/service/antigravity_gateway_service_test.go index 9c1fb415..ffdcdc73 100644 --- a/backend/internal/service/antigravity_gateway_service_test.go +++ b/backend/internal/service/antigravity_gateway_service_test.go @@ -161,3 +161,28 @@ func TestAntigravityGatewayService_Forward_PromptTooLong(t *testing.T) { require.Len(t, events, 1) require.Equal(t, "prompt_too_long", events[0].Kind) } + +func TestAntigravityMaxRetriesForModel_AfterSwitch(t *testing.T) { + t.Setenv(antigravityMaxRetriesEnv, "4") + t.Setenv(antigravityMaxRetriesAfterSwitchEnv, "7") + t.Setenv(antigravityMaxRetriesClaudeEnv, "") + t.Setenv(antigravityMaxRetriesGeminiTextEnv, "") + t.Setenv(antigravityMaxRetriesGeminiImageEnv, "") + + got := antigravityMaxRetriesForModel("claude-sonnet-4-5", false) + require.Equal(t, 4, got) + + got = antigravityMaxRetriesForModel("claude-sonnet-4-5", true) + require.Equal(t, 7, got) +} + +func TestAntigravityMaxRetriesForModel_AfterSwitchFallback(t *testing.T) { + t.Setenv(antigravityMaxRetriesEnv, "5") + t.Setenv(antigravityMaxRetriesAfterSwitchEnv, "") + t.Setenv(antigravityMaxRetriesClaudeEnv, "") + t.Setenv(antigravityMaxRetriesGeminiTextEnv, "") + t.Setenv(antigravityMaxRetriesGeminiImageEnv, "") + + got := antigravityMaxRetriesForModel("gemini-2.5-flash", true) + require.Equal(t, 5, got) +} diff --git a/backend/internal/service/ops_retry.go b/backend/internal/service/ops_retry.go index 8d98e43f..ffe4c934 100644 --- a/backend/internal/service/ops_retry.go +++ b/backend/internal/service/ops_retry.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/gin-gonic/gin" "github.com/lib/pq" @@ -476,9 +477,13 @@ func (s *OpsService) executeClientRetry(ctx context.Context, reqType opsRetryReq continue } + attemptCtx := ctx + if switches > 0 { + attemptCtx = context.WithValue(attemptCtx, ctxkey.AccountSwitchCount, switches) + } exec := func() *opsRetryExecution { defer selection.ReleaseFunc() - return s.executeWithAccount(ctx, reqType, errorLog, body, account) + return s.executeWithAccount(attemptCtx, reqType, errorLog, body, account) }() if exec != nil { From 5b787334c80204ae342387e9808374308b57d59a Mon Sep 17 00:00:00 2001 From: song Date: Wed, 28 Jan 2026 11:17:39 +0800 Subject: [PATCH 044/214] =?UTF-8?q?antigravity:=20=E8=BD=AC=E5=8F=91?= =?UTF-8?q?=E4=BC=98=E5=85=88=20daily?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/pkg/antigravity/oauth.go | 62 ++++++++++++++++--- .../service/antigravity_gateway_service.go | 5 +- 2 files changed, 57 insertions(+), 10 deletions(-) diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index ee2a6c1a..99bedb01 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -40,17 +40,48 @@ const ( // URL 可用性 TTL(不可用 URL 的恢复时间) URLAvailabilityTTL = 5 * time.Minute + + // Antigravity API 端点 + antigravityProdBaseURL = "https://cloudcode-pa.googleapis.com" + antigravityDailyBaseURL = "https://daily-cloudcode-pa.sandbox.googleapis.com" ) // BaseURLs 定义 Antigravity API 端点(与 Antigravity-Manager 保持一致) var BaseURLs = []string{ - "https://cloudcode-pa.googleapis.com", // prod (优先) - "https://daily-cloudcode-pa.sandbox.googleapis.com", // daily sandbox (备用) + antigravityProdBaseURL, // prod (优先) + antigravityDailyBaseURL, // daily sandbox (备用) } // BaseURL 默认 URL(保持向后兼容) var BaseURL = BaseURLs[0] +// ForwardBaseURLs 返回 API 转发用的 URL 顺序(daily 优先) +func ForwardBaseURLs() []string { + if len(BaseURLs) == 0 { + return nil + } + urls := append([]string(nil), BaseURLs...) + dailyIndex := -1 + for i, url := range urls { + if url == antigravityDailyBaseURL { + dailyIndex = i + break + } + } + if dailyIndex <= 0 { + return urls + } + reordered := make([]string, 0, len(urls)) + reordered = append(reordered, urls[dailyIndex]) + for i, url := range urls { + if i == dailyIndex { + continue + } + reordered = append(reordered, url) + } + return reordered +} + // URLAvailability 管理 URL 可用性状态(带 TTL 自动恢复和动态优先级) type URLAvailability struct { mu sync.RWMutex @@ -100,22 +131,37 @@ func (u *URLAvailability) IsAvailable(url string) bool { // GetAvailableURLs 返回可用的 URL 列表 // 最近成功的 URL 优先,其他按默认顺序 func (u *URLAvailability) GetAvailableURLs() []string { + return u.GetAvailableURLsWithBase(BaseURLs) +} + +// GetAvailableURLsWithBase 返回可用的 URL 列表(使用自定义顺序) +// 最近成功的 URL 优先,其他按传入顺序 +func (u *URLAvailability) GetAvailableURLsWithBase(baseURLs []string) []string { u.mu.RLock() defer u.mu.RUnlock() now := time.Now() - result := make([]string, 0, len(BaseURLs)) + result := make([]string, 0, len(baseURLs)) // 如果有最近成功的 URL 且可用,放在最前面 if u.lastSuccess != "" { - expiry, exists := u.unavailable[u.lastSuccess] - if !exists || now.After(expiry) { - result = append(result, u.lastSuccess) + found := false + for _, url := range baseURLs { + if url == u.lastSuccess { + found = true + break + } + } + if found { + expiry, exists := u.unavailable[u.lastSuccess] + if !exists || now.After(expiry) { + result = append(result, u.lastSuccess) + } } } - // 添加其他可用的 URL(按默认顺序) - for _, url := range BaseURLs { + // 添加其他可用的 URL(按传入顺序) + for _, url := range baseURLs { // 跳过已添加的 lastSuccess if url == u.lastSuccess { continue diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index db988565..67c60db5 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -77,9 +77,10 @@ func (e *PromptTooLongError) Error() string { // antigravityRetryLoop 执行带 URL fallback 的重试循环 func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopResult, error) { - availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() + baseURLs := antigravity.ForwardBaseURLs() + availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLsWithBase(baseURLs) if len(availableURLs) == 0 { - availableURLs = antigravity.BaseURLs + availableURLs = baseURLs } maxRetries := p.maxRetries if maxRetries <= 0 { From 31f817d189c6db22940c8b836c50f48073dae61a Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 01:28:43 +0800 Subject: [PATCH 045/214] fix: add newline separation for Claude Code system prompt --- backend/internal/service/account_test_service.go | 2 +- backend/internal/service/gateway_service.go | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/backend/internal/service/account_test_service.go b/backend/internal/service/account_test_service.go index 46376c69..3290fe52 100644 --- a/backend/internal/service/account_test_service.go +++ b/backend/internal/service/account_test_service.go @@ -123,7 +123,7 @@ func createTestPayload(modelID string) (map[string]any, error) { "system": []map[string]any{ { "type": "text", - "text": "You are Claude Code, Anthropic's official CLI for Claude.", + "text": claudeCodeSystemPrompt, "cache_control": map[string]string{ "type": "ephemeral", }, diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index b46e856e..b1507245 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -39,7 +39,9 @@ const ( claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" stickySessionTTL = time.Hour // 粘性会话TTL defaultMaxLineSize = 40 * 1024 * 1024 - claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude." + // Keep a trailing blank line so that when upstream concatenates system strings, + // the injected Claude Code banner doesn't run into the next system instruction. + claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude.\n\n" maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) @@ -2479,7 +2481,8 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { case nil: newSystem = []any{claudeCodeBlock} case string: - if v == "" || v == claudeCodeSystemPrompt { + // Be tolerant of older/newer clients that may differ only by trailing whitespace/newlines. + if strings.TrimSpace(v) == "" || strings.TrimSpace(v) == strings.TrimSpace(claudeCodeSystemPrompt) { newSystem = []any{claudeCodeBlock} } else { newSystem = []any{claudeCodeBlock, map[string]any{"type": "text", "text": v}} @@ -2489,7 +2492,7 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { newSystem = append(newSystem, claudeCodeBlock) for _, item := range v { if m, ok := item.(map[string]any); ok { - if text, ok := m["text"].(string); ok && text == claudeCodeSystemPrompt { + if text, ok := m["text"].(string); ok && strings.TrimSpace(text) == strings.TrimSpace(claudeCodeSystemPrompt) { continue } } From 4d566f68b687cf09e7f523d4b8a3342ccbaa2553 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 01:34:58 +0800 Subject: [PATCH 046/214] chore: gofmt --- backend/internal/service/gateway_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index b1507245..01663ae7 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -42,7 +42,7 @@ const ( // Keep a trailing blank line so that when upstream concatenates system strings, // the injected Claude Code banner doesn't run into the next system instruction. claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude.\n\n" - maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 + maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) func (s *GatewayService) debugModelRoutingEnabled() bool { From 723e54013a2196daa19371db1884e0c016b61b6a Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 01:49:51 +0800 Subject: [PATCH 047/214] fix(oauth): mimic Claude Code metadata and beta headers --- backend/internal/pkg/claude/constants.go | 7 ++- .../service/gateway_oauth_metadata_test.go | 62 +++++++++++++++++++ backend/internal/service/gateway_service.go | 17 +++-- 3 files changed, 79 insertions(+), 7 deletions(-) create mode 100644 backend/internal/service/gateway_oauth_metadata_test.go diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index 0c6e9b4c..fb95ffe2 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -16,7 +16,12 @@ const ( const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming // MessageBetaHeaderNoTools /v1/messages 在无工具时的 beta header -const MessageBetaHeaderNoTools = BetaOAuth + "," + BetaInterleavedThinking +// +// NOTE: Claude Code OAuth credentials are scoped to Claude Code. When we "mimic" +// Claude Code for non-Claude-Code clients, we must include the claude-code beta +// even if the request doesn't use tools, otherwise upstream may reject the +// request as a non-Claude-Code API request. +const MessageBetaHeaderNoTools = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking // MessageBetaHeaderWithTools /v1/messages 在有工具时的 beta header const MessageBetaHeaderWithTools = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking diff --git a/backend/internal/service/gateway_oauth_metadata_test.go b/backend/internal/service/gateway_oauth_metadata_test.go new file mode 100644 index 00000000..ed6f1887 --- /dev/null +++ b/backend/internal/service/gateway_oauth_metadata_test.go @@ -0,0 +1,62 @@ +package service + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuildOAuthMetadataUserID_FallbackWithoutAccountUUID(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + Model: "claude-sonnet-4-5", + Stream: true, + MetadataUserID: "", + System: nil, + Messages: nil, + } + + account := &Account{ + ID: 123, + Type: AccountTypeOAuth, + Extra: map[string]any{}, // intentionally missing account_uuid / claude_user_id + } + + fp := &Fingerprint{ClientID: "deadbeef"} // should be used as user id in legacy format + + got := svc.buildOAuthMetadataUserID(parsed, account, fp) + require.NotEmpty(t, got) + + // Legacy format: user_{client}_account__session_{uuid} + re := regexp.MustCompile(`^user_[a-zA-Z0-9]+_account__session_[a-f0-9-]{36}$`) + require.True(t, re.MatchString(got), "unexpected user_id format: %s", got) +} + +func TestBuildOAuthMetadataUserID_UsesAccountUUIDWhenPresent(t *testing.T) { + svc := &GatewayService{} + + parsed := &ParsedRequest{ + Model: "claude-sonnet-4-5", + Stream: true, + MetadataUserID: "", + } + + account := &Account{ + ID: 123, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "account_uuid": "acc-uuid", + "claude_user_id": "clientid123", + "anthropic_user_id": "", + }, + } + + got := svc.buildOAuthMetadataUserID(parsed, account, nil) + require.NotEmpty(t, got) + + // New format: user_{client}_account_{account_uuid}_session_{uuid} + re := regexp.MustCompile(`^user_clientid123_account_acc-uuid_session_[a-f0-9-]{36}$`) + require.True(t, re.MatchString(got), "unexpected user_id format: %s", got) +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 01663ae7..1ebd1246 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -795,17 +795,15 @@ func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account if parsed.MetadataUserID != "" { return "" } - accountUUID := account.GetExtraString("account_uuid") - if accountUUID == "" { - return "" - } userID := strings.TrimSpace(account.GetClaudeUserID()) if userID == "" && fp != nil { userID = fp.ClientID } if userID == "" { - return "" + // Fall back to a random, well-formed client id so we can still satisfy + // Claude Code OAuth requirements when account metadata is incomplete. + userID = generateClientID() } sessionHash := s.GenerateSessionHash(parsed) @@ -814,7 +812,14 @@ func (s *GatewayService) buildOAuthMetadataUserID(parsed *ParsedRequest, account seed := fmt.Sprintf("%d::%s", account.ID, sessionHash) sessionID = generateSessionUUID(seed) } - return fmt.Sprintf("user_%s_account_%s_session_%s", userID, accountUUID, sessionID) + + // Prefer the newer format that includes account_uuid (if present), + // otherwise fall back to the legacy Claude Code format. + accountUUID := strings.TrimSpace(account.GetExtraString("account_uuid")) + if accountUUID != "" { + return fmt.Sprintf("user_%s_account_%s_session_%s", userID, accountUUID, sessionID) + } + return fmt.Sprintf("user_%s_account__session_%s", userID, sessionID) } func generateSessionUUID(seed string) string { From be3b788b8fd0b9a6715c6c9cfeddfaed4fa9ff65 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 02:03:54 +0800 Subject: [PATCH 048/214] fix: also prefix next system block with Claude Code banner --- .../internal/service/gateway_prompt_test.go | 9 +++++--- backend/internal/service/gateway_service.go | 22 ++++++++++++++++++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/backend/internal/service/gateway_prompt_test.go b/backend/internal/service/gateway_prompt_test.go index b056f8fa..52c75d1d 100644 --- a/backend/internal/service/gateway_prompt_test.go +++ b/backend/internal/service/gateway_prompt_test.go @@ -2,6 +2,7 @@ package service import ( "encoding/json" + "strings" "testing" "github.com/stretchr/testify/require" @@ -134,6 +135,8 @@ func TestSystemIncludesClaudeCodePrompt(t *testing.T) { } func TestInjectClaudeCodePrompt(t *testing.T) { + claudePrefix := strings.TrimSpace(claudeCodeSystemPrompt) + tests := []struct { name string body string @@ -162,7 +165,7 @@ func TestInjectClaudeCodePrompt(t *testing.T) { system: "Custom prompt", wantSystemLen: 2, wantFirstText: claudeCodeSystemPrompt, - wantSecondText: "Custom prompt", + wantSecondText: claudePrefix + "\n\nCustom prompt", }, { name: "string system equals Claude Code prompt", @@ -178,7 +181,7 @@ func TestInjectClaudeCodePrompt(t *testing.T) { // Claude Code + Custom = 2 wantSystemLen: 2, wantFirstText: claudeCodeSystemPrompt, - wantSecondText: "Custom", + wantSecondText: claudePrefix + "\n\nCustom", }, { name: "array system with existing Claude Code prompt (should dedupe)", @@ -190,7 +193,7 @@ func TestInjectClaudeCodePrompt(t *testing.T) { // Claude Code at start + Other = 2 (deduped) wantSystemLen: 2, wantFirstText: claudeCodeSystemPrompt, - wantSecondText: "Other", + wantSecondText: claudePrefix + "\n\nOther", }, { name: "empty array", diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 1ebd1246..c23b4f36 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -2479,6 +2479,10 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { "text": claudeCodeSystemPrompt, "cache_control": map[string]string{"type": "ephemeral"}, } + // Opencode plugin applies an extra safeguard: it not only prepends the Claude Code + // banner, it also prefixes the next system instruction with the same banner plus + // a blank line. This helps when upstream concatenates system instructions. + claudeCodePrefix := strings.TrimSpace(claudeCodeSystemPrompt) var newSystem []any @@ -2490,16 +2494,32 @@ func injectClaudeCodePrompt(body []byte, system any) []byte { if strings.TrimSpace(v) == "" || strings.TrimSpace(v) == strings.TrimSpace(claudeCodeSystemPrompt) { newSystem = []any{claudeCodeBlock} } else { - newSystem = []any{claudeCodeBlock, map[string]any{"type": "text", "text": v}} + // Mirror opencode behavior: keep the banner as a separate system entry, + // but also prefix the next system text with the banner. + merged := v + if !strings.HasPrefix(v, claudeCodePrefix) { + merged = claudeCodePrefix + "\n\n" + v + } + newSystem = []any{claudeCodeBlock, map[string]any{"type": "text", "text": merged}} } case []any: newSystem = make([]any, 0, len(v)+1) newSystem = append(newSystem, claudeCodeBlock) + prefixedNext := false for _, item := range v { if m, ok := item.(map[string]any); ok { if text, ok := m["text"].(string); ok && strings.TrimSpace(text) == strings.TrimSpace(claudeCodeSystemPrompt) { continue } + // Prefix the first subsequent text system block once. + if !prefixedNext { + if blockType, _ := m["type"].(string); blockType == "text" { + if text, ok := m["text"].(string); ok && strings.TrimSpace(text) != "" && !strings.HasPrefix(text, claudeCodePrefix) { + m["text"] = claudeCodePrefix + "\n\n" + text + prefixedNext = true + } + } + } } newSystem = append(newSystem, item) } From 4d40fb6b602a0469bbdaa56bd047493a9d712f32 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 02:36:28 +0800 Subject: [PATCH 049/214] fix(oauth): merge anthropic-beta and force Claude Code headers in mimic mode --- backend/internal/service/gateway_beta_test.go | 23 +++++++ backend/internal/service/gateway_service.go | 66 +++++++++++++++++-- 2 files changed, 84 insertions(+), 5 deletions(-) create mode 100644 backend/internal/service/gateway_beta_test.go diff --git a/backend/internal/service/gateway_beta_test.go b/backend/internal/service/gateway_beta_test.go new file mode 100644 index 00000000..dd58c183 --- /dev/null +++ b/backend/internal/service/gateway_beta_test.go @@ -0,0 +1,23 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMergeAnthropicBeta(t *testing.T) { + got := mergeAnthropicBeta( + []string{"oauth-2025-04-20", "interleaved-thinking-2025-05-14"}, + "foo, oauth-2025-04-20,bar, foo", + ) + require.Equal(t, "oauth-2025-04-20,interleaved-thinking-2025-05-14,foo,bar", got) +} + +func TestMergeAnthropicBeta_EmptyIncoming(t *testing.T) { + got := mergeAnthropicBeta( + []string{"oauth-2025-04-20", "interleaved-thinking-2025-05-14"}, + "", + ) + require.Equal(t, "oauth-2025-04-20,interleaved-thinking-2025-05-14", got) +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index c23b4f36..c666c96a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3230,12 +3230,18 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 处理 anthropic-beta header(OAuth 账号需要包含 oauth beta) if tokenType == "oauth" { if mimicClaudeCode { - // 非 Claude Code 客户端:按 Claude Code 规则生成 beta header + // 非 Claude Code 客户端:按 opencode 的策略处理: + // - 强制 Claude Code 指纹相关请求头(尤其是 user-agent/x-stainless/x-app) + // - 保留 incoming beta 的同时,确保 OAuth 所需 beta 存在 + applyClaudeCodeMimicHeaders(req, reqStream) + + incomingBeta := req.Header.Get("anthropic-beta") + requiredBetas := []string{claude.BetaOAuth, claude.BetaInterleavedThinking} + // Tools 场景更严格,保留 claude-code beta 以提高 Claude Code 识别成功率。 if requestHasTools(body) { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderWithTools) - } else { - req.Header.Set("anthropic-beta", claude.MessageBetaHeaderNoTools) + requiredBetas = append([]string{claude.BetaClaudeCode}, requiredBetas...) } + req.Header.Set("anthropic-beta", mergeAnthropicBeta(requiredBetas, incomingBeta)) } else { // Claude Code 客户端:尽量透传原始 header,仅补齐 oauth beta clientBetaHeader := req.Header.Get("anthropic-beta") @@ -3353,6 +3359,52 @@ func applyClaudeOAuthHeaderDefaults(req *http.Request, isStream bool) { } } +func mergeAnthropicBeta(required []string, incoming string) string { + seen := make(map[string]struct{}, len(required)+8) + out := make([]string, 0, len(required)+8) + + add := func(v string) { + v = strings.TrimSpace(v) + if v == "" { + return + } + if _, ok := seen[v]; ok { + return + } + seen[v] = struct{}{} + out = append(out, v) + } + + for _, r := range required { + add(r) + } + for _, p := range strings.Split(incoming, ",") { + add(p) + } + return strings.Join(out, ",") +} + +// applyClaudeCodeMimicHeaders forces "Claude Code-like" request headers. +// This mirrors opencode-anthropic-auth behavior: do not trust downstream +// headers when using Claude Code-scoped OAuth credentials. +func applyClaudeCodeMimicHeaders(req *http.Request, isStream bool) { + if req == nil { + return + } + // Start with the standard defaults (fill missing). + applyClaudeOAuthHeaderDefaults(req, isStream) + // Then force key headers to match Claude Code fingerprint regardless of what the client sent. + for key, value := range claude.DefaultHeaders { + if value == "" { + continue + } + req.Header.Set(key, value) + } + if isStream { + req.Header.Set("x-stainless-helper-method", "stream") + } +} + func truncateForLog(b []byte, maxBytes int) string { if maxBytes <= 0 { maxBytes = 2048 @@ -4600,7 +4652,11 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // OAuth 账号:处理 anthropic-beta header if tokenType == "oauth" { if mimicClaudeCode { - req.Header.Set("anthropic-beta", claude.CountTokensBetaHeader) + applyClaudeCodeMimicHeaders(req, false) + + incomingBeta := req.Header.Get("anthropic-beta") + requiredBetas := []string{claude.BetaClaudeCode, claude.BetaOAuth, claude.BetaInterleavedThinking, claude.BetaTokenCounting} + req.Header.Set("anthropic-beta", mergeAnthropicBeta(requiredBetas, incomingBeta)) } else { clientBetaHeader := req.Header.Get("anthropic-beta") if clientBetaHeader == "" { From c37fe91672796d2d1f44f2d1d21a91edc3232a10 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 02:52:26 +0800 Subject: [PATCH 050/214] fix(oauth): update Claude CLI fingerprint headers --- backend/internal/pkg/claude/constants.go | 8 +++++--- backend/internal/service/identity_service.go | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index fb95ffe2..8b3441dc 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -40,13 +40,15 @@ const APIKeyHaikuBetaHeader = BetaInterleavedThinking // DefaultHeaders 是 Claude Code 客户端默认请求头。 var DefaultHeaders = map[string]string{ - "User-Agent": "claude-cli/2.1.2 (external, cli)", + // Keep these in sync with recent Claude CLI traffic to reduce the chance + // that Claude Code-scoped OAuth credentials are rejected as "non-CLI" usage. + "User-Agent": "claude-cli/2.1.22 (external, cli)", "X-Stainless-Lang": "js", "X-Stainless-Package-Version": "0.70.0", "X-Stainless-OS": "Linux", - "X-Stainless-Arch": "x64", + "X-Stainless-Arch": "arm64", "X-Stainless-Runtime": "node", - "X-Stainless-Runtime-Version": "v24.3.0", + "X-Stainless-Runtime-Version": "v24.13.0", "X-Stainless-Retry-Count": "0", "X-Stainless-Timeout": "600", "X-App": "cli", diff --git a/backend/internal/service/identity_service.go b/backend/internal/service/identity_service.go index 4e227fea..a620ac4d 100644 --- a/backend/internal/service/identity_service.go +++ b/backend/internal/service/identity_service.go @@ -26,13 +26,13 @@ var ( // 默认指纹值(当客户端未提供时使用) var defaultFingerprint = Fingerprint{ - UserAgent: "claude-cli/2.1.2 (external, cli)", + UserAgent: "claude-cli/2.1.22 (external, cli)", StainlessLang: "js", StainlessPackageVersion: "0.70.0", StainlessOS: "Linux", - StainlessArch: "x64", + StainlessArch: "arm64", StainlessRuntime: "node", - StainlessRuntimeVersion: "v24.3.0", + StainlessRuntimeVersion: "v24.13.0", } // Fingerprint represents account fingerprint data From d98648f03ba9a0b4308d9b7aeed1e45416ddaf71 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 03:03:40 +0800 Subject: [PATCH 051/214] fix: rewrite OpenCode identity sentence to Claude Code --- .../internal/service/gateway_sanitize_test.go | 20 +++++++++++++++++++ backend/internal/service/gateway_service.go | 8 ++++++++ 2 files changed, 28 insertions(+) create mode 100644 backend/internal/service/gateway_sanitize_test.go diff --git a/backend/internal/service/gateway_sanitize_test.go b/backend/internal/service/gateway_sanitize_test.go new file mode 100644 index 00000000..3b0a07c9 --- /dev/null +++ b/backend/internal/service/gateway_sanitize_test.go @@ -0,0 +1,20 @@ +package service + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSanitizeOpenCodeText_RewritesCanonicalSentence(t *testing.T) { + in := "You are OpenCode, the best coding agent on the planet." + got := sanitizeOpenCodeText(in) + require.Equal(t, strings.TrimSpace(claudeCodeSystemPrompt), got) +} + +func TestSanitizeOpenCodeText_RewritesOpenCodeKeywords(t *testing.T) { + in := "OpenCode and opencode are mentioned." + got := sanitizeOpenCodeText(in) + require.Equal(t, "Claude Code and Claude are mentioned.", got) +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index c666c96a..e17d0f0c 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -559,6 +559,14 @@ func sanitizeOpenCodeText(text string) string { if text == "" { return text } + // Some clients include a fixed OpenCode identity sentence. Anthropic may treat + // this as a non-Claude-Code fingerprint, so rewrite it to the canonical + // Claude Code banner before generic "OpenCode"/"opencode" replacements. + text = strings.ReplaceAll( + text, + "You are OpenCode, the best coding agent on the planet.", + strings.TrimSpace(claudeCodeSystemPrompt), + ) text = strings.ReplaceAll(text, "OpenCode", "Claude Code") text = opencodeTextRe.ReplaceAllString(text, "Claude") return text From 63412a9fcc4f9569d8338cd5ce3befbfc13604a3 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 03:13:14 +0800 Subject: [PATCH 052/214] chore(debug): log Claude mimic fingerprint --- backend/internal/service/gateway_service.go | 128 ++++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index e17d0f0c..44abdb0a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -50,6 +50,11 @@ func (s *GatewayService) debugModelRoutingEnabled() bool { return v == "1" || v == "true" || v == "yes" || v == "on" } +func (s *GatewayService) debugClaudeMimicEnabled() bool { + v := strings.ToLower(strings.TrimSpace(os.Getenv("SUB2API_DEBUG_CLAUDE_MIMIC"))) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + func shortSessionHash(sessionHash string) string { if sessionHash == "" { return "" @@ -60,6 +65,121 @@ func shortSessionHash(sessionHash string) string { return sessionHash[:8] } +func redactAuthHeaderValue(v string) string { + v = strings.TrimSpace(v) + if v == "" { + return "" + } + // Keep scheme for debugging, redact secret. + if strings.HasPrefix(strings.ToLower(v), "bearer ") { + return "Bearer [redacted]" + } + return "[redacted]" +} + +func safeHeaderValueForLog(key string, v string) string { + key = strings.ToLower(strings.TrimSpace(key)) + switch key { + case "authorization", "x-api-key": + return redactAuthHeaderValue(v) + default: + return strings.TrimSpace(v) + } +} + +func extractSystemPreviewFromBody(body []byte) string { + if len(body) == 0 { + return "" + } + sys := gjson.GetBytes(body, "system") + if !sys.Exists() { + return "" + } + + switch { + case sys.IsArray(): + for _, item := range sys.Array() { + if !item.IsObject() { + continue + } + if strings.EqualFold(item.Get("type").String(), "text") { + if t := item.Get("text").String(); strings.TrimSpace(t) != "" { + return t + } + } + } + return "" + case sys.Type == gjson.String: + return sys.String() + default: + return "" + } +} + +func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) { + if req == nil { + return + } + + // Only log a minimal fingerprint to avoid leaking user content. + interesting := []string{ + "user-agent", + "x-app", + "anthropic-dangerous-direct-browser-access", + "anthropic-version", + "anthropic-beta", + "x-stainless-lang", + "x-stainless-package-version", + "x-stainless-os", + "x-stainless-arch", + "x-stainless-runtime", + "x-stainless-runtime-version", + "x-stainless-retry-count", + "x-stainless-timeout", + "authorization", + "x-api-key", + "content-type", + "accept", + "x-stainless-helper-method", + } + + h := make([]string, 0, len(interesting)) + for _, k := range interesting { + if v := req.Header.Get(k); v != "" { + h = append(h, fmt.Sprintf("%s=%q", k, safeHeaderValueForLog(k, v))) + } + } + + metaUserID := strings.TrimSpace(gjson.GetBytes(body, "metadata.user_id").String()) + sysPreview := strings.TrimSpace(extractSystemPreviewFromBody(body)) + + // Truncate preview to keep logs sane. + if len(sysPreview) > 300 { + sysPreview = sysPreview[:300] + "..." + } + sysPreview = strings.ReplaceAll(sysPreview, "\n", "\\n") + sysPreview = strings.ReplaceAll(sysPreview, "\r", "\\r") + + aid := int64(0) + aname := "" + if account != nil { + aid = account.ID + aname = account.Name + } + + log.Printf( + "[ClaudeMimicDebug] url=%s account=%d(%s) tokenType=%s mimic=%t meta.user_id=%q system.preview=%q headers={%s}", + req.URL.String(), + aid, + aname, + tokenType, + mimicClaudeCode, + metaUserID, + sysPreview, + strings.Join(h, " "), + ) +} + // sseDataRe matches SSE data lines with optional whitespace after colon. // Some upstream APIs return non-standard "data:" without space (should be "data: "). var ( @@ -3264,6 +3384,10 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } } + if s.debugClaudeMimicEnabled() { + logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) + } + return req, nil } @@ -4686,6 +4810,10 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } } + if s.debugClaudeMimicEnabled() { + logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) + } + return req, nil } From 91079d3f15a66ecd9460daa122f6b8dc65c3957b Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 15:17:46 +0800 Subject: [PATCH 053/214] chore(debug): emit Claude mimic fingerprint on credential-scope error --- backend/internal/service/gateway_service.go | 64 +++++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 44abdb0a..b3bbfd94 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -45,6 +45,10 @@ const ( maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) +const ( + claudeMimicDebugInfoKey = "claude_mimic_debug_info" +) + func (s *GatewayService) debugModelRoutingEnabled() bool { v := strings.ToLower(strings.TrimSpace(os.Getenv("SUB2API_DEBUG_MODEL_ROUTING"))) return v == "1" || v == "true" || v == "yes" || v == "on" @@ -116,9 +120,9 @@ func extractSystemPreviewFromBody(body []byte) string { } } -func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) { +func buildClaudeMimicDebugLine(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) string { if req == nil { - return + return "" } // Only log a minimal fingerprint to avoid leaking user content. @@ -167,8 +171,8 @@ func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, token aname = account.Name } - log.Printf( - "[ClaudeMimicDebug] url=%s account=%d(%s) tokenType=%s mimic=%t meta.user_id=%q system.preview=%q headers={%s}", + return fmt.Sprintf( + "url=%s account=%d(%s) tokenType=%s mimic=%t meta.user_id=%q system.preview=%q headers={%s}", req.URL.String(), aid, aname, @@ -180,6 +184,23 @@ func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, token ) } +func logClaudeMimicDebug(req *http.Request, body []byte, account *Account, tokenType string, mimicClaudeCode bool) { + line := buildClaudeMimicDebugLine(req, body, account, tokenType, mimicClaudeCode) + if line == "" { + return + } + log.Printf("[ClaudeMimicDebug] %s", line) +} + +func isClaudeCodeCredentialScopeError(msg string) bool { + m := strings.ToLower(strings.TrimSpace(msg)) + if m == "" { + return false + } + return strings.Contains(m, "only authorized for use with claude code") && + strings.Contains(m, "cannot be used for other api requests") +} + // sseDataRe matches SSE data lines with optional whitespace after colon. // Some upstream APIs return non-standard "data:" without space (should be "data: "). var ( @@ -3384,6 +3405,11 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } } + // Always capture a compact fingerprint line for later error diagnostics. + // We only print it when needed (or when the explicit debug flag is enabled). + if c != nil && tokenType == "oauth" { + c.Set(claudeMimicDebugInfoKey, buildClaudeMimicDebugLine(req, body, account, tokenType, mimicClaudeCode)) + } if s.debugClaudeMimicEnabled() { logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) } @@ -3640,6 +3666,20 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + // Print a compact upstream request fingerprint when we hit the Claude Code OAuth + // credential scope error. This avoids requiring env-var tweaks in a fixed deploy. + if isClaudeCodeCredentialScopeError(upstreamMsg) && c != nil { + if v, ok := c.Get(claudeMimicDebugInfoKey); ok { + if line, ok := v.(string); ok && strings.TrimSpace(line) != "" { + log.Printf("[ClaudeMimicDebugOnError] status=%d request_id=%s %s", + resp.StatusCode, + resp.Header.Get("x-request-id"), + line, + ) + } + } + } + // Enrich Ops error logs with upstream status + message, and optionally a truncated body snippet. upstreamDetail := "" if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { @@ -3769,6 +3809,19 @@ func (s *GatewayService) handleRetryExhaustedError(ctx context.Context, resp *ht upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + if isClaudeCodeCredentialScopeError(upstreamMsg) && c != nil { + if v, ok := c.Get(claudeMimicDebugInfoKey); ok { + if line, ok := v.(string); ok && strings.TrimSpace(line) != "" { + log.Printf("[ClaudeMimicDebugOnError] status=%d request_id=%s %s", + resp.StatusCode, + resp.Header.Get("x-request-id"), + line, + ) + } + } + } + upstreamDetail := "" if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes @@ -4810,6 +4863,9 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } } + if c != nil && tokenType == "oauth" { + c.Set(claudeMimicDebugInfoKey, buildClaudeMimicDebugLine(req, body, account, tokenType, mimicClaudeCode)) + } if s.debugClaudeMimicEnabled() { logClaudeMimicDebug(req, body, account, tokenType, mimicClaudeCode) } From 8375094c69a1e70d8dfbe02303357081e2606166 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 15:31:29 +0800 Subject: [PATCH 054/214] fix(oauth): match Claude CLI accept header and beta set --- backend/internal/service/gateway_service.go | 31 +++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index b3bbfd94..8363ba66 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3385,12 +3385,12 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex applyClaudeCodeMimicHeaders(req, reqStream) incomingBeta := req.Header.Get("anthropic-beta") + // Match real Claude CLI traffic (per mitmproxy reports): + // messages requests typically use only oauth + interleaved-thinking. + // Also drop claude-code beta if a downstream client added it. requiredBetas := []string{claude.BetaOAuth, claude.BetaInterleavedThinking} - // Tools 场景更严格,保留 claude-code beta 以提高 Claude Code 识别成功率。 - if requestHasTools(body) { - requiredBetas = append([]string{claude.BetaClaudeCode}, requiredBetas...) - } - req.Header.Set("anthropic-beta", mergeAnthropicBeta(requiredBetas, incomingBeta)) + drop := map[string]struct{}{claude.BetaClaudeCode: {}} + req.Header.Set("anthropic-beta", mergeAnthropicBetaDropping(requiredBetas, incomingBeta, drop)) } else { // Claude Code 客户端:尽量透传原始 header,仅补齐 oauth beta clientBetaHeader := req.Header.Get("anthropic-beta") @@ -3542,6 +3542,25 @@ func mergeAnthropicBeta(required []string, incoming string) string { return strings.Join(out, ",") } +func mergeAnthropicBetaDropping(required []string, incoming string, drop map[string]struct{}) string { + merged := mergeAnthropicBeta(required, incoming) + if merged == "" || len(drop) == 0 { + return merged + } + out := make([]string, 0, 8) + for _, p := range strings.Split(merged, ",") { + p = strings.TrimSpace(p) + if p == "" { + continue + } + if _, ok := drop[p]; ok { + continue + } + out = append(out, p) + } + return strings.Join(out, ",") +} + // applyClaudeCodeMimicHeaders forces "Claude Code-like" request headers. // This mirrors opencode-anthropic-auth behavior: do not trust downstream // headers when using Claude Code-scoped OAuth credentials. @@ -3558,6 +3577,8 @@ func applyClaudeCodeMimicHeaders(req *http.Request, isStream bool) { } req.Header.Set(key, value) } + // Real Claude CLI uses Accept: application/json (even for streaming). + req.Header.Set("accept", "application/json") if isStream { req.Header.Set("x-stainless-helper-method", "stream") } From fa454b1b99f2c6866f756773c96f91cad8353173 Mon Sep 17 00:00:00 2001 From: cyhhao Date: Thu, 29 Jan 2026 15:37:07 +0800 Subject: [PATCH 055/214] fix: align Claude Code system banner with opencode latest --- backend/internal/service/gateway_service.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 8363ba66..47ea8593 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -39,9 +39,10 @@ const ( claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" stickySessionTTL = time.Hour // 粘性会话TTL defaultMaxLineSize = 40 * 1024 * 1024 - // Keep a trailing blank line so that when upstream concatenates system strings, - // the injected Claude Code banner doesn't run into the next system instruction. - claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude.\n\n" + // Canonical Claude Code banner. Keep it EXACT (no trailing whitespace/newlines) + // to match real Claude CLI traffic as closely as possible. When we need a visual + // separator between system blocks, we add "\n\n" at concatenation time. + claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude." maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) From 7ade9baa1559c461dbb103f150377917ad5a2c20 Mon Sep 17 00:00:00 2001 From: song Date: Thu, 29 Jan 2026 21:09:33 +0800 Subject: [PATCH 056/214] =?UTF-8?q?fix(gateway):=20=E8=BF=87=E6=BB=A4=20Ge?= =?UTF-8?q?mini=20=E8=AF=B7=E6=B1=82=E4=B8=AD=20parts=20=E4=B8=BA=E7=A9=BA?= =?UTF-8?q?=E7=9A=84=E6=B6=88=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gemini API 不接受 contents 数组中 parts 为空的消息,会返回 400 INVALID_ARGUMENT 错误。 添加 filterEmptyPartsFromGeminiRequest 函数在转发前过滤这类消息。 影响范围:ForwardGemini (antigravity) 和 ForwardNative (gemini) --- .../service/antigravity_gateway_service.go | 61 ++++++++++++++++++- .../service/gemini_messages_compat_service.go | 5 ++ 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 67c60db5..6331acd8 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -1412,8 +1412,15 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co proxyURL = account.Proxy.URL() } + // 过滤掉 parts 为空的消息(Gemini API 不接受空 parts) + filteredBody, err := filterEmptyPartsFromGeminiRequest(body) + if err != nil { + log.Printf("[Antigravity] Failed to filter empty parts: %v", err) + filteredBody = body + } + // Antigravity 上游要求必须包含身份提示词,注入到请求中 - injectedBody, err := injectIdentityPatchToGeminiRequest(body) + injectedBody, err := injectIdentityPatchToGeminiRequest(filteredBody) if err != nil { return nil, err } @@ -2778,3 +2785,55 @@ func cleanGeminiRequest(body []byte) ([]byte, error) { return json.Marshal(payload) } + +// filterEmptyPartsFromGeminiRequest 过滤 Gemini 请求中 parts 为空的消息 +// Gemini API 不接受 parts 为空数组的消息,会返回 400 错误 +func filterEmptyPartsFromGeminiRequest(body []byte) ([]byte, error) { + var payload map[string]any + if err := json.Unmarshal(body, &payload); err != nil { + return nil, err + } + + contents, ok := payload["contents"].([]any) + if !ok || len(contents) == 0 { + return body, nil + } + + filtered := make([]any, 0, len(contents)) + modified := false + + for _, c := range contents { + contentMap, ok := c.(map[string]any) + if !ok { + filtered = append(filtered, c) + continue + } + + parts, hasParts := contentMap["parts"] + if !hasParts { + filtered = append(filtered, c) + continue + } + + partsSlice, ok := parts.([]any) + if !ok { + filtered = append(filtered, c) + continue + } + + // 跳过 parts 为空数组的消息 + if len(partsSlice) == 0 { + modified = true + continue + } + + filtered = append(filtered, c) + } + + if !modified { + return body, nil + } + + payload["contents"] = filtered + return json.Marshal(payload) +} diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 1879a94c..3833e66e 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -840,6 +840,11 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. return nil, s.writeGoogleError(c, http.StatusBadRequest, "Request body is empty") } + // 过滤掉 parts 为空的消息(Gemini API 不接受空 parts) + if filteredBody, err := filterEmptyPartsFromGeminiRequest(body); err == nil { + body = filteredBody + } + switch action { case "generateContent", "streamGenerateContent", "countTokens": // ok From ba16ace697c6b2b65ca6c4e84818f04dd28aeabd Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Fri, 30 Jan 2026 08:14:52 +0800 Subject: [PATCH 057/214] chore: upgrade Antigravity User-Agent to 1.15.8 --- backend/internal/pkg/antigravity/oauth.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index ee2a6c1a..c7d657b9 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -33,7 +33,7 @@ const ( "https://www.googleapis.com/auth/experimentsandconfigs" // User-Agent(与 Antigravity-Manager 保持一致) - UserAgent = "antigravity/1.11.9 windows/amd64" + UserAgent = "antigravity/1.15.8 windows/amd64" // Session 过期时间 SessionTTL = 30 * time.Minute From 6599b366dc17abe62fdd79683b7ee71a06888667 Mon Sep 17 00:00:00 2001 From: shaw Date: Fri, 30 Jan 2026 08:53:53 +0800 Subject: [PATCH 058/214] =?UTF-8?q?fix:=20=E5=8D=87=E7=BA=A7Go=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E8=87=B31.25.6=E4=BF=AE=E5=A4=8D=E6=A0=87=E5=87=86?= =?UTF-8?q?=E5=BA=93=E5=AE=89=E5=85=A8=E6=BC=8F=E6=B4=9E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修复GO-2026-4341和GO-2026-4340两个标准库漏洞 --- .github/workflows/security-scan.yml | 2 +- Dockerfile | 2 +- backend/go.mod | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 160a0df9..dfb8e37e 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -22,7 +22,7 @@ jobs: cache-dependency-path: backend/go.sum - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: Run govulncheck working-directory: backend run: | diff --git a/Dockerfile b/Dockerfile index b3320300..3d4b5094 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ # ============================================================================= ARG NODE_IMAGE=node:24-alpine -ARG GOLANG_IMAGE=golang:1.25.5-alpine +ARG GOLANG_IMAGE=golang:1.25.6-alpine ARG ALPINE_IMAGE=alpine:3.20 ARG GOPROXY=https://goproxy.cn,direct ARG GOSUMDB=sum.golang.google.cn diff --git a/backend/go.mod b/backend/go.mod index ad7d76b6..4c3e6246 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -1,6 +1,6 @@ module github.com/Wei-Shaw/sub2api -go 1.25.5 +go 1.25.6 require ( entgo.io/ent v0.14.5 From 4d8f2db92494a29b6b74d220493b02760c48befb Mon Sep 17 00:00:00 2001 From: shaw Date: Fri, 30 Jan 2026 08:57:37 +0800 Subject: [PATCH 059/214] =?UTF-8?q?fix:=20=E6=9B=B4=E6=96=B0=E6=89=80?= =?UTF-8?q?=E6=9C=89CI=20workflow=E7=9A=84Go=E7=89=88=E6=9C=AC=E9=AA=8C?= =?UTF-8?q?=E8=AF=81=E8=87=B31.25.6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/backend-ci.yml | 4 ++-- .github/workflows/release.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml index 3ea8860a..e5624f86 100644 --- a/.github/workflows/backend-ci.yml +++ b/.github/workflows/backend-ci.yml @@ -19,7 +19,7 @@ jobs: cache: true - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: Unit tests working-directory: backend run: make test-unit @@ -38,7 +38,7 @@ jobs: cache: true - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: golangci-lint uses: golangci/golangci-lint-action@v9 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0415000d..f45c1a0b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -115,7 +115,7 @@ jobs: - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' # Docker setup for GoReleaser - name: Set up QEMU From b7f69844e1f8eada74167848dfa8d2456792d639 Mon Sep 17 00:00:00 2001 From: ducky Date: Fri, 30 Jan 2026 16:45:04 +0800 Subject: [PATCH 060/214] feat(announcements): add admin/user announcement system Implements announcements end-to-end (admin CRUD + read status, user list + mark read) with OR-of-AND targeting. Also breaks the ent<->service import cycle by moving schema-facing constants/targeting into a new domain package. --- backend/cmd/server/wire_gen.go | 9 +- backend/ent/announcement.go | 249 +++ backend/ent/announcement/announcement.go | 164 ++ backend/ent/announcement/where.go | 624 ++++++ backend/ent/announcement_create.go | 1159 +++++++++++ backend/ent/announcement_delete.go | 88 + backend/ent/announcement_query.go | 643 ++++++ backend/ent/announcement_update.go | 824 ++++++++ backend/ent/announcementread.go | 185 ++ .../ent/announcementread/announcementread.go | 127 ++ backend/ent/announcementread/where.go | 257 +++ backend/ent/announcementread_create.go | 660 +++++++ backend/ent/announcementread_delete.go | 88 + backend/ent/announcementread_query.go | 718 +++++++ backend/ent/announcementread_update.go | 456 +++++ backend/ent/client.go | 376 +++- backend/ent/ent.go | 4 + backend/ent/hook/hook.go | 24 + backend/ent/intercept/intercept.go | 60 + backend/ent/migrate/schema.go | 102 + backend/ent/mutation.go | 1759 ++++++++++++++++- backend/ent/predicate/predicate.go | 6 + backend/ent/runtime/runtime.go | 52 + backend/ent/schema/account.go | 4 +- backend/ent/schema/announcement.go | 91 + backend/ent/schema/announcement_read.go | 66 + backend/ent/schema/api_key.go | 4 +- backend/ent/schema/group.go | 8 +- backend/ent/schema/promo_code.go | 4 +- backend/ent/schema/redeem_code.go | 6 +- backend/ent/schema/user.go | 7 +- backend/ent/schema/user_subscription.go | 4 +- backend/ent/tx.go | 6 + backend/ent/user.go | 28 +- backend/ent/user/user.go | 30 + backend/ent/user/where.go | 23 + backend/ent/user_create.go | 32 + backend/ent/user_query.go | 76 +- backend/ent/user_update.go | 163 ++ backend/internal/domain/announcement.go | 226 +++ backend/internal/domain/constants.go | 64 + .../handler/admin/announcement_handler.go | 247 +++ .../internal/handler/announcement_handler.go | 82 + backend/internal/handler/dto/announcement.go | 75 + backend/internal/handler/handler.go | 2 + backend/internal/handler/wire.go | 6 + .../repository/announcement_read_repo.go | 84 + .../internal/repository/announcement_repo.go | 195 ++ backend/internal/repository/wire.go | 2 + backend/internal/server/routes/admin.go | 15 + backend/internal/server/routes/user.go | 7 + backend/internal/service/announcement.go | 64 + .../internal/service/announcement_service.go | 378 ++++ .../service/announcement_targeting_test.go | 67 + backend/internal/service/domain_constants.go | 56 +- backend/internal/service/wire.go | 1 + backend/migrations/045_add_announcements.sql | 44 + frontend/src/api/admin/announcements.ts | 71 + frontend/src/api/admin/index.ts | 3 + frontend/src/api/announcements.ts | 26 + frontend/src/api/index.ts | 1 + .../AnnouncementReadStatusDialog.vue | 186 ++ .../AnnouncementTargetingEditor.vue | 388 ++++ frontend/src/components/layout/AppSidebar.vue | 18 + frontend/src/i18n/locales/en.ts | 83 + frontend/src/i18n/locales/zh.ts | 83 + frontend/src/router/index.ts | 24 + frontend/src/types/index.ts | 75 + .../src/views/admin/AnnouncementsView.vue | 538 +++++ frontend/src/views/user/AnnouncementsView.vue | 140 ++ 70 files changed, 12366 insertions(+), 71 deletions(-) create mode 100644 backend/ent/announcement.go create mode 100644 backend/ent/announcement/announcement.go create mode 100644 backend/ent/announcement/where.go create mode 100644 backend/ent/announcement_create.go create mode 100644 backend/ent/announcement_delete.go create mode 100644 backend/ent/announcement_query.go create mode 100644 backend/ent/announcement_update.go create mode 100644 backend/ent/announcementread.go create mode 100644 backend/ent/announcementread/announcementread.go create mode 100644 backend/ent/announcementread/where.go create mode 100644 backend/ent/announcementread_create.go create mode 100644 backend/ent/announcementread_delete.go create mode 100644 backend/ent/announcementread_query.go create mode 100644 backend/ent/announcementread_update.go create mode 100644 backend/ent/schema/announcement.go create mode 100644 backend/ent/schema/announcement_read.go create mode 100644 backend/internal/domain/announcement.go create mode 100644 backend/internal/domain/constants.go create mode 100644 backend/internal/handler/admin/announcement_handler.go create mode 100644 backend/internal/handler/announcement_handler.go create mode 100644 backend/internal/handler/dto/announcement.go create mode 100644 backend/internal/repository/announcement_read_repo.go create mode 100644 backend/internal/repository/announcement_repo.go create mode 100644 backend/internal/service/announcement.go create mode 100644 backend/internal/service/announcement_service.go create mode 100644 backend/internal/service/announcement_targeting_test.go create mode 100644 backend/migrations/045_add_announcements.sql create mode 100644 frontend/src/api/admin/announcements.ts create mode 100644 frontend/src/api/announcements.ts create mode 100644 frontend/src/components/admin/announcements/AnnouncementReadStatusDialog.vue create mode 100644 frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue create mode 100644 frontend/src/views/admin/AnnouncementsView.vue create mode 100644 frontend/src/views/user/AnnouncementsView.vue diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 71624091..7d465fee 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -81,6 +81,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator) redeemHandler := handler.NewRedeemHandler(redeemService) subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService) + announcementRepository := repository.NewAnnouncementRepository(client) + announcementReadRepository := repository.NewAnnouncementReadRepository(client) + announcementService := service.NewAnnouncementService(announcementRepository, announcementReadRepository, userRepository, userSubscriptionRepository) + announcementHandler := handler.NewAnnouncementHandler(announcementService) dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db) dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig) dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig) @@ -128,6 +132,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig) sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig) accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, compositeTokenCacheInvalidator) + adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService) oAuthHandler := admin.NewOAuthHandler(oAuthService) openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService) geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService) @@ -167,12 +172,12 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { userAttributeValueRepository := repository.NewUserAttributeValueRepository(client) userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository) userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) totpHandler := handler.NewTotpHandler(totpService) - handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler) + handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler) jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService) adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService) apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig) diff --git a/backend/ent/announcement.go b/backend/ent/announcement.go new file mode 100644 index 00000000..93d7a375 --- /dev/null +++ b/backend/ent/announcement.go @@ -0,0 +1,249 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/internal/domain" +) + +// Announcement is the model entity for the Announcement schema. +type Announcement struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // 公告标题 + Title string `json:"title,omitempty"` + // 公告内容(支持 Markdown) + Content string `json:"content,omitempty"` + // 状态: draft, active, archived + Status string `json:"status,omitempty"` + // 展示条件(JSON 规则) + Targeting domain.AnnouncementTargeting `json:"targeting,omitempty"` + // 开始展示时间(为空表示立即生效) + StartsAt *time.Time `json:"starts_at,omitempty"` + // 结束展示时间(为空表示永久生效) + EndsAt *time.Time `json:"ends_at,omitempty"` + // 创建人用户ID(管理员) + CreatedBy *int64 `json:"created_by,omitempty"` + // 更新人用户ID(管理员) + UpdatedBy *int64 `json:"updated_by,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AnnouncementQuery when eager-loading is set. + Edges AnnouncementEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AnnouncementEdges holds the relations/edges for other nodes in the graph. +type AnnouncementEdges struct { + // Reads holds the value of the reads edge. + Reads []*AnnouncementRead `json:"reads,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// ReadsOrErr returns the Reads value or an error if the edge +// was not loaded in eager-loading. +func (e AnnouncementEdges) ReadsOrErr() ([]*AnnouncementRead, error) { + if e.loadedTypes[0] { + return e.Reads, nil + } + return nil, &NotLoadedError{edge: "reads"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Announcement) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case announcement.FieldTargeting: + values[i] = new([]byte) + case announcement.FieldID, announcement.FieldCreatedBy, announcement.FieldUpdatedBy: + values[i] = new(sql.NullInt64) + case announcement.FieldTitle, announcement.FieldContent, announcement.FieldStatus: + values[i] = new(sql.NullString) + case announcement.FieldStartsAt, announcement.FieldEndsAt, announcement.FieldCreatedAt, announcement.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Announcement fields. +func (_m *Announcement) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case announcement.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case announcement.FieldTitle: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field title", values[i]) + } else if value.Valid { + _m.Title = value.String + } + case announcement.FieldContent: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field content", values[i]) + } else if value.Valid { + _m.Content = value.String + } + case announcement.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case announcement.FieldTargeting: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field targeting", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Targeting); err != nil { + return fmt.Errorf("unmarshal field targeting: %w", err) + } + } + case announcement.FieldStartsAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field starts_at", values[i]) + } else if value.Valid { + _m.StartsAt = new(time.Time) + *_m.StartsAt = value.Time + } + case announcement.FieldEndsAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field ends_at", values[i]) + } else if value.Valid { + _m.EndsAt = new(time.Time) + *_m.EndsAt = value.Time + } + case announcement.FieldCreatedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + _m.CreatedBy = new(int64) + *_m.CreatedBy = value.Int64 + } + case announcement.FieldUpdatedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field updated_by", values[i]) + } else if value.Valid { + _m.UpdatedBy = new(int64) + *_m.UpdatedBy = value.Int64 + } + case announcement.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case announcement.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Announcement. +// This includes values selected through modifiers, order, etc. +func (_m *Announcement) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryReads queries the "reads" edge of the Announcement entity. +func (_m *Announcement) QueryReads() *AnnouncementReadQuery { + return NewAnnouncementClient(_m.config).QueryReads(_m) +} + +// Update returns a builder for updating this Announcement. +// Note that you need to call Announcement.Unwrap() before calling this method if this Announcement +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Announcement) Update() *AnnouncementUpdateOne { + return NewAnnouncementClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Announcement entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Announcement) Unwrap() *Announcement { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Announcement is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Announcement) String() string { + var builder strings.Builder + builder.WriteString("Announcement(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("title=") + builder.WriteString(_m.Title) + builder.WriteString(", ") + builder.WriteString("content=") + builder.WriteString(_m.Content) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("targeting=") + builder.WriteString(fmt.Sprintf("%v", _m.Targeting)) + builder.WriteString(", ") + if v := _m.StartsAt; v != nil { + builder.WriteString("starts_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.EndsAt; v != nil { + builder.WriteString("ends_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.CreatedBy; v != nil { + builder.WriteString("created_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.UpdatedBy; v != nil { + builder.WriteString("updated_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Announcements is a parsable slice of Announcement. +type Announcements []*Announcement diff --git a/backend/ent/announcement/announcement.go b/backend/ent/announcement/announcement.go new file mode 100644 index 00000000..4f34ee05 --- /dev/null +++ b/backend/ent/announcement/announcement.go @@ -0,0 +1,164 @@ +// Code generated by ent, DO NOT EDIT. + +package announcement + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the announcement type in the database. + Label = "announcement" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldTitle holds the string denoting the title field in the database. + FieldTitle = "title" + // FieldContent holds the string denoting the content field in the database. + FieldContent = "content" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldTargeting holds the string denoting the targeting field in the database. + FieldTargeting = "targeting" + // FieldStartsAt holds the string denoting the starts_at field in the database. + FieldStartsAt = "starts_at" + // FieldEndsAt holds the string denoting the ends_at field in the database. + FieldEndsAt = "ends_at" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // FieldUpdatedBy holds the string denoting the updated_by field in the database. + FieldUpdatedBy = "updated_by" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeReads holds the string denoting the reads edge name in mutations. + EdgeReads = "reads" + // Table holds the table name of the announcement in the database. + Table = "announcements" + // ReadsTable is the table that holds the reads relation/edge. + ReadsTable = "announcement_reads" + // ReadsInverseTable is the table name for the AnnouncementRead entity. + // It exists in this package in order to avoid circular dependency with the "announcementread" package. + ReadsInverseTable = "announcement_reads" + // ReadsColumn is the table column denoting the reads relation/edge. + ReadsColumn = "announcement_id" +) + +// Columns holds all SQL columns for announcement fields. +var Columns = []string{ + FieldID, + FieldTitle, + FieldContent, + FieldStatus, + FieldTargeting, + FieldStartsAt, + FieldEndsAt, + FieldCreatedBy, + FieldUpdatedBy, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // TitleValidator is a validator for the "title" field. It is called by the builders before save. + TitleValidator func(string) error + // ContentValidator is a validator for the "content" field. It is called by the builders before save. + ContentValidator func(string) error + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Announcement queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByTitle orders the results by the title field. +func ByTitle(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTitle, opts...).ToFunc() +} + +// ByContent orders the results by the content field. +func ByContent(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldContent, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByStartsAt orders the results by the starts_at field. +func ByStartsAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartsAt, opts...).ToFunc() +} + +// ByEndsAt orders the results by the ends_at field. +func ByEndsAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndsAt, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByUpdatedBy orders the results by the updated_by field. +func ByUpdatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedBy, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByReadsCount orders the results by reads count. +func ByReadsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newReadsStep(), opts...) + } +} + +// ByReads orders the results by reads terms. +func ByReads(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newReadsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newReadsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ReadsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ReadsTable, ReadsColumn), + ) +} diff --git a/backend/ent/announcement/where.go b/backend/ent/announcement/where.go new file mode 100644 index 00000000..d3cad2a5 --- /dev/null +++ b/backend/ent/announcement/where.go @@ -0,0 +1,624 @@ +// Code generated by ent, DO NOT EDIT. + +package announcement + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldID, id)) +} + +// Title applies equality check predicate on the "title" field. It's identical to TitleEQ. +func Title(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldTitle, v)) +} + +// Content applies equality check predicate on the "content" field. It's identical to ContentEQ. +func Content(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldContent, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStatus, v)) +} + +// StartsAt applies equality check predicate on the "starts_at" field. It's identical to StartsAtEQ. +func StartsAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v)) +} + +// EndsAt applies equality check predicate on the "ends_at" field. It's identical to EndsAtEQ. +func EndsAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldEndsAt, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedBy, v)) +} + +// UpdatedBy applies equality check predicate on the "updated_by" field. It's identical to UpdatedByEQ. +func UpdatedBy(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// TitleEQ applies the EQ predicate on the "title" field. +func TitleEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldTitle, v)) +} + +// TitleNEQ applies the NEQ predicate on the "title" field. +func TitleNEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldTitle, v)) +} + +// TitleIn applies the In predicate on the "title" field. +func TitleIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldTitle, vs...)) +} + +// TitleNotIn applies the NotIn predicate on the "title" field. +func TitleNotIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldTitle, vs...)) +} + +// TitleGT applies the GT predicate on the "title" field. +func TitleGT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldTitle, v)) +} + +// TitleGTE applies the GTE predicate on the "title" field. +func TitleGTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldTitle, v)) +} + +// TitleLT applies the LT predicate on the "title" field. +func TitleLT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldTitle, v)) +} + +// TitleLTE applies the LTE predicate on the "title" field. +func TitleLTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldTitle, v)) +} + +// TitleContains applies the Contains predicate on the "title" field. +func TitleContains(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContains(FieldTitle, v)) +} + +// TitleHasPrefix applies the HasPrefix predicate on the "title" field. +func TitleHasPrefix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasPrefix(FieldTitle, v)) +} + +// TitleHasSuffix applies the HasSuffix predicate on the "title" field. +func TitleHasSuffix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasSuffix(FieldTitle, v)) +} + +// TitleEqualFold applies the EqualFold predicate on the "title" field. +func TitleEqualFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEqualFold(FieldTitle, v)) +} + +// TitleContainsFold applies the ContainsFold predicate on the "title" field. +func TitleContainsFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContainsFold(FieldTitle, v)) +} + +// ContentEQ applies the EQ predicate on the "content" field. +func ContentEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldContent, v)) +} + +// ContentNEQ applies the NEQ predicate on the "content" field. +func ContentNEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldContent, v)) +} + +// ContentIn applies the In predicate on the "content" field. +func ContentIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldContent, vs...)) +} + +// ContentNotIn applies the NotIn predicate on the "content" field. +func ContentNotIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldContent, vs...)) +} + +// ContentGT applies the GT predicate on the "content" field. +func ContentGT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldContent, v)) +} + +// ContentGTE applies the GTE predicate on the "content" field. +func ContentGTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldContent, v)) +} + +// ContentLT applies the LT predicate on the "content" field. +func ContentLT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldContent, v)) +} + +// ContentLTE applies the LTE predicate on the "content" field. +func ContentLTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldContent, v)) +} + +// ContentContains applies the Contains predicate on the "content" field. +func ContentContains(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContains(FieldContent, v)) +} + +// ContentHasPrefix applies the HasPrefix predicate on the "content" field. +func ContentHasPrefix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasPrefix(FieldContent, v)) +} + +// ContentHasSuffix applies the HasSuffix predicate on the "content" field. +func ContentHasSuffix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasSuffix(FieldContent, v)) +} + +// ContentEqualFold applies the EqualFold predicate on the "content" field. +func ContentEqualFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEqualFold(FieldContent, v)) +} + +// ContentContainsFold applies the ContainsFold predicate on the "content" field. +func ContentContainsFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContainsFold(FieldContent, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContainsFold(FieldStatus, v)) +} + +// TargetingIsNil applies the IsNil predicate on the "targeting" field. +func TargetingIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldTargeting)) +} + +// TargetingNotNil applies the NotNil predicate on the "targeting" field. +func TargetingNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldTargeting)) +} + +// StartsAtEQ applies the EQ predicate on the "starts_at" field. +func StartsAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v)) +} + +// StartsAtNEQ applies the NEQ predicate on the "starts_at" field. +func StartsAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldStartsAt, v)) +} + +// StartsAtIn applies the In predicate on the "starts_at" field. +func StartsAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldStartsAt, vs...)) +} + +// StartsAtNotIn applies the NotIn predicate on the "starts_at" field. +func StartsAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldStartsAt, vs...)) +} + +// StartsAtGT applies the GT predicate on the "starts_at" field. +func StartsAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldStartsAt, v)) +} + +// StartsAtGTE applies the GTE predicate on the "starts_at" field. +func StartsAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldStartsAt, v)) +} + +// StartsAtLT applies the LT predicate on the "starts_at" field. +func StartsAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldStartsAt, v)) +} + +// StartsAtLTE applies the LTE predicate on the "starts_at" field. +func StartsAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldStartsAt, v)) +} + +// StartsAtIsNil applies the IsNil predicate on the "starts_at" field. +func StartsAtIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldStartsAt)) +} + +// StartsAtNotNil applies the NotNil predicate on the "starts_at" field. +func StartsAtNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldStartsAt)) +} + +// EndsAtEQ applies the EQ predicate on the "ends_at" field. +func EndsAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldEndsAt, v)) +} + +// EndsAtNEQ applies the NEQ predicate on the "ends_at" field. +func EndsAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldEndsAt, v)) +} + +// EndsAtIn applies the In predicate on the "ends_at" field. +func EndsAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldEndsAt, vs...)) +} + +// EndsAtNotIn applies the NotIn predicate on the "ends_at" field. +func EndsAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldEndsAt, vs...)) +} + +// EndsAtGT applies the GT predicate on the "ends_at" field. +func EndsAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldEndsAt, v)) +} + +// EndsAtGTE applies the GTE predicate on the "ends_at" field. +func EndsAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldEndsAt, v)) +} + +// EndsAtLT applies the LT predicate on the "ends_at" field. +func EndsAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldEndsAt, v)) +} + +// EndsAtLTE applies the LTE predicate on the "ends_at" field. +func EndsAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldEndsAt, v)) +} + +// EndsAtIsNil applies the IsNil predicate on the "ends_at" field. +func EndsAtIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldEndsAt)) +} + +// EndsAtNotNil applies the NotNil predicate on the "ends_at" field. +func EndsAtNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldEndsAt)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldCreatedBy, v)) +} + +// CreatedByIsNil applies the IsNil predicate on the "created_by" field. +func CreatedByIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldCreatedBy)) +} + +// CreatedByNotNil applies the NotNil predicate on the "created_by" field. +func CreatedByNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldCreatedBy)) +} + +// UpdatedByEQ applies the EQ predicate on the "updated_by" field. +func UpdatedByEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// UpdatedByNEQ applies the NEQ predicate on the "updated_by" field. +func UpdatedByNEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldUpdatedBy, v)) +} + +// UpdatedByIn applies the In predicate on the "updated_by" field. +func UpdatedByIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByNotIn applies the NotIn predicate on the "updated_by" field. +func UpdatedByNotIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByGT applies the GT predicate on the "updated_by" field. +func UpdatedByGT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldUpdatedBy, v)) +} + +// UpdatedByGTE applies the GTE predicate on the "updated_by" field. +func UpdatedByGTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldUpdatedBy, v)) +} + +// UpdatedByLT applies the LT predicate on the "updated_by" field. +func UpdatedByLT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldUpdatedBy, v)) +} + +// UpdatedByLTE applies the LTE predicate on the "updated_by" field. +func UpdatedByLTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldUpdatedBy, v)) +} + +// UpdatedByIsNil applies the IsNil predicate on the "updated_by" field. +func UpdatedByIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldUpdatedBy)) +} + +// UpdatedByNotNil applies the NotNil predicate on the "updated_by" field. +func UpdatedByNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldUpdatedBy)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasReads applies the HasEdge predicate on the "reads" edge. +func HasReads() predicate.Announcement { + return predicate.Announcement(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ReadsTable, ReadsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasReadsWith applies the HasEdge predicate on the "reads" edge with a given conditions (other predicates). +func HasReadsWith(preds ...predicate.AnnouncementRead) predicate.Announcement { + return predicate.Announcement(func(s *sql.Selector) { + step := newReadsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Announcement) predicate.Announcement { + return predicate.Announcement(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Announcement) predicate.Announcement { + return predicate.Announcement(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Announcement) predicate.Announcement { + return predicate.Announcement(sql.NotPredicates(p)) +} diff --git a/backend/ent/announcement_create.go b/backend/ent/announcement_create.go new file mode 100644 index 00000000..151d4c11 --- /dev/null +++ b/backend/ent/announcement_create.go @@ -0,0 +1,1159 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/internal/domain" +) + +// AnnouncementCreate is the builder for creating a Announcement entity. +type AnnouncementCreate struct { + config + mutation *AnnouncementMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetTitle sets the "title" field. +func (_c *AnnouncementCreate) SetTitle(v string) *AnnouncementCreate { + _c.mutation.SetTitle(v) + return _c +} + +// SetContent sets the "content" field. +func (_c *AnnouncementCreate) SetContent(v string) *AnnouncementCreate { + _c.mutation.SetContent(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *AnnouncementCreate) SetStatus(v string) *AnnouncementCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableStatus(v *string) *AnnouncementCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetTargeting sets the "targeting" field. +func (_c *AnnouncementCreate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementCreate { + _c.mutation.SetTargeting(v) + return _c +} + +// SetNillableTargeting sets the "targeting" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementCreate { + if v != nil { + _c.SetTargeting(*v) + } + return _c +} + +// SetStartsAt sets the "starts_at" field. +func (_c *AnnouncementCreate) SetStartsAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetStartsAt(v) + return _c +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableStartsAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetStartsAt(*v) + } + return _c +} + +// SetEndsAt sets the "ends_at" field. +func (_c *AnnouncementCreate) SetEndsAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetEndsAt(v) + return _c +} + +// SetNillableEndsAt sets the "ends_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableEndsAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetEndsAt(*v) + } + return _c +} + +// SetCreatedBy sets the "created_by" field. +func (_c *AnnouncementCreate) SetCreatedBy(v int64) *AnnouncementCreate { + _c.mutation.SetCreatedBy(v) + return _c +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableCreatedBy(v *int64) *AnnouncementCreate { + if v != nil { + _c.SetCreatedBy(*v) + } + return _c +} + +// SetUpdatedBy sets the "updated_by" field. +func (_c *AnnouncementCreate) SetUpdatedBy(v int64) *AnnouncementCreate { + _c.mutation.SetUpdatedBy(v) + return _c +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableUpdatedBy(v *int64) *AnnouncementCreate { + if v != nil { + _c.SetUpdatedBy(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AnnouncementCreate) SetCreatedAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableCreatedAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *AnnouncementCreate) SetUpdatedAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableUpdatedAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs. +func (_c *AnnouncementCreate) AddReadIDs(ids ...int64) *AnnouncementCreate { + _c.mutation.AddReadIDs(ids...) + return _c +} + +// AddReads adds the "reads" edges to the AnnouncementRead entity. +func (_c *AnnouncementCreate) AddReads(v ...*AnnouncementRead) *AnnouncementCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddReadIDs(ids...) +} + +// Mutation returns the AnnouncementMutation object of the builder. +func (_c *AnnouncementCreate) Mutation() *AnnouncementMutation { + return _c.mutation +} + +// Save creates the Announcement in the database. +func (_c *AnnouncementCreate) Save(ctx context.Context) (*Announcement, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AnnouncementCreate) SaveX(ctx context.Context) *Announcement { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AnnouncementCreate) defaults() { + if _, ok := _c.mutation.Status(); !ok { + v := announcement.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := announcement.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := announcement.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AnnouncementCreate) check() error { + if _, ok := _c.mutation.Title(); !ok { + return &ValidationError{Name: "title", err: errors.New(`ent: missing required field "Announcement.title"`)} + } + if v, ok := _c.mutation.Title(); ok { + if err := announcement.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)} + } + } + if _, ok := _c.mutation.Content(); !ok { + return &ValidationError{Name: "content", err: errors.New(`ent: missing required field "Announcement.content"`)} + } + if v, ok := _c.mutation.Content(); ok { + if err := announcement.ContentValidator(v); err != nil { + return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Announcement.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := announcement.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Announcement.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Announcement.updated_at"`)} + } + return nil +} + +func (_c *AnnouncementCreate) sqlSave(ctx context.Context) (*Announcement, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *AnnouncementCreate) createSpec() (*Announcement, *sqlgraph.CreateSpec) { + var ( + _node = &Announcement{config: _c.config} + _spec = sqlgraph.NewCreateSpec(announcement.Table, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Title(); ok { + _spec.SetField(announcement.FieldTitle, field.TypeString, value) + _node.Title = value + } + if value, ok := _c.mutation.Content(); ok { + _spec.SetField(announcement.FieldContent, field.TypeString, value) + _node.Content = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(announcement.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Targeting(); ok { + _spec.SetField(announcement.FieldTargeting, field.TypeJSON, value) + _node.Targeting = value + } + if value, ok := _c.mutation.StartsAt(); ok { + _spec.SetField(announcement.FieldStartsAt, field.TypeTime, value) + _node.StartsAt = &value + } + if value, ok := _c.mutation.EndsAt(); ok { + _spec.SetField(announcement.FieldEndsAt, field.TypeTime, value) + _node.EndsAt = &value + } + if value, ok := _c.mutation.CreatedBy(); ok { + _spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value) + _node.CreatedBy = &value + } + if value, ok := _c.mutation.UpdatedBy(); ok { + _spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value) + _node.UpdatedBy = &value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(announcement.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := _c.mutation.ReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Announcement.Create(). +// SetTitle(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementUpsert) { +// SetTitle(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementCreate) OnConflict(opts ...sql.ConflictOption) *AnnouncementUpsertOne { + _c.conflict = opts + return &AnnouncementUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementCreate) OnConflictColumns(columns ...string) *AnnouncementUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementUpsertOne{ + create: _c, + } +} + +type ( + // AnnouncementUpsertOne is the builder for "upsert"-ing + // one Announcement node. + AnnouncementUpsertOne struct { + create *AnnouncementCreate + } + + // AnnouncementUpsert is the "OnConflict" setter. + AnnouncementUpsert struct { + *sql.UpdateSet + } +) + +// SetTitle sets the "title" field. +func (u *AnnouncementUpsert) SetTitle(v string) *AnnouncementUpsert { + u.Set(announcement.FieldTitle, v) + return u +} + +// UpdateTitle sets the "title" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateTitle() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldTitle) + return u +} + +// SetContent sets the "content" field. +func (u *AnnouncementUpsert) SetContent(v string) *AnnouncementUpsert { + u.Set(announcement.FieldContent, v) + return u +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateContent() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldContent) + return u +} + +// SetStatus sets the "status" field. +func (u *AnnouncementUpsert) SetStatus(v string) *AnnouncementUpsert { + u.Set(announcement.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateStatus() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldStatus) + return u +} + +// SetTargeting sets the "targeting" field. +func (u *AnnouncementUpsert) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsert { + u.Set(announcement.FieldTargeting, v) + return u +} + +// UpdateTargeting sets the "targeting" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateTargeting() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldTargeting) + return u +} + +// ClearTargeting clears the value of the "targeting" field. +func (u *AnnouncementUpsert) ClearTargeting() *AnnouncementUpsert { + u.SetNull(announcement.FieldTargeting) + return u +} + +// SetStartsAt sets the "starts_at" field. +func (u *AnnouncementUpsert) SetStartsAt(v time.Time) *AnnouncementUpsert { + u.Set(announcement.FieldStartsAt, v) + return u +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateStartsAt() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldStartsAt) + return u +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (u *AnnouncementUpsert) ClearStartsAt() *AnnouncementUpsert { + u.SetNull(announcement.FieldStartsAt) + return u +} + +// SetEndsAt sets the "ends_at" field. +func (u *AnnouncementUpsert) SetEndsAt(v time.Time) *AnnouncementUpsert { + u.Set(announcement.FieldEndsAt, v) + return u +} + +// UpdateEndsAt sets the "ends_at" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateEndsAt() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldEndsAt) + return u +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (u *AnnouncementUpsert) ClearEndsAt() *AnnouncementUpsert { + u.SetNull(announcement.FieldEndsAt) + return u +} + +// SetCreatedBy sets the "created_by" field. +func (u *AnnouncementUpsert) SetCreatedBy(v int64) *AnnouncementUpsert { + u.Set(announcement.FieldCreatedBy, v) + return u +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateCreatedBy() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldCreatedBy) + return u +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *AnnouncementUpsert) AddCreatedBy(v int64) *AnnouncementUpsert { + u.Add(announcement.FieldCreatedBy, v) + return u +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (u *AnnouncementUpsert) ClearCreatedBy() *AnnouncementUpsert { + u.SetNull(announcement.FieldCreatedBy) + return u +} + +// SetUpdatedBy sets the "updated_by" field. +func (u *AnnouncementUpsert) SetUpdatedBy(v int64) *AnnouncementUpsert { + u.Set(announcement.FieldUpdatedBy, v) + return u +} + +// UpdateUpdatedBy sets the "updated_by" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateUpdatedBy() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldUpdatedBy) + return u +} + +// AddUpdatedBy adds v to the "updated_by" field. +func (u *AnnouncementUpsert) AddUpdatedBy(v int64) *AnnouncementUpsert { + u.Add(announcement.FieldUpdatedBy, v) + return u +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (u *AnnouncementUpsert) ClearUpdatedBy() *AnnouncementUpsert { + u.SetNull(announcement.FieldUpdatedBy) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AnnouncementUpsert) SetUpdatedAt(v time.Time) *AnnouncementUpsert { + u.Set(announcement.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateUpdatedAt() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldUpdatedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementUpsertOne) UpdateNewValues() *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(announcement.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementUpsertOne) Ignore() *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementUpsertOne) DoNothing() *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementCreate.OnConflict +// documentation for more info. +func (u *AnnouncementUpsertOne) Update(set func(*AnnouncementUpsert)) *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementUpsert{UpdateSet: update}) + })) + return u +} + +// SetTitle sets the "title" field. +func (u *AnnouncementUpsertOne) SetTitle(v string) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTitle(v) + }) +} + +// UpdateTitle sets the "title" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateTitle() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTitle() + }) +} + +// SetContent sets the "content" field. +func (u *AnnouncementUpsertOne) SetContent(v string) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetContent(v) + }) +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateContent() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateContent() + }) +} + +// SetStatus sets the "status" field. +func (u *AnnouncementUpsertOne) SetStatus(v string) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateStatus() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStatus() + }) +} + +// SetTargeting sets the "targeting" field. +func (u *AnnouncementUpsertOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTargeting(v) + }) +} + +// UpdateTargeting sets the "targeting" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateTargeting() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTargeting() + }) +} + +// ClearTargeting clears the value of the "targeting" field. +func (u *AnnouncementUpsertOne) ClearTargeting() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearTargeting() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *AnnouncementUpsertOne) SetStartsAt(v time.Time) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateStartsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStartsAt() + }) +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (u *AnnouncementUpsertOne) ClearStartsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearStartsAt() + }) +} + +// SetEndsAt sets the "ends_at" field. +func (u *AnnouncementUpsertOne) SetEndsAt(v time.Time) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetEndsAt(v) + }) +} + +// UpdateEndsAt sets the "ends_at" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateEndsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateEndsAt() + }) +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (u *AnnouncementUpsertOne) ClearEndsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearEndsAt() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *AnnouncementUpsertOne) SetCreatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *AnnouncementUpsertOne) AddCreatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateCreatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateCreatedBy() + }) +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (u *AnnouncementUpsertOne) ClearCreatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearCreatedBy() + }) +} + +// SetUpdatedBy sets the "updated_by" field. +func (u *AnnouncementUpsertOne) SetUpdatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedBy(v) + }) +} + +// AddUpdatedBy adds v to the "updated_by" field. +func (u *AnnouncementUpsertOne) AddUpdatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.AddUpdatedBy(v) + }) +} + +// UpdateUpdatedBy sets the "updated_by" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateUpdatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedBy() + }) +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (u *AnnouncementUpsertOne) ClearUpdatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearUpdatedBy() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AnnouncementUpsertOne) SetUpdatedAt(v time.Time) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateUpdatedAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *AnnouncementUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *AnnouncementUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// AnnouncementCreateBulk is the builder for creating many Announcement entities in bulk. +type AnnouncementCreateBulk struct { + config + err error + builders []*AnnouncementCreate + conflict []sql.ConflictOption +} + +// Save creates the Announcement entities in the database. +func (_c *AnnouncementCreateBulk) Save(ctx context.Context) ([]*Announcement, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Announcement, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AnnouncementMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AnnouncementCreateBulk) SaveX(ctx context.Context) []*Announcement { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Announcement.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementUpsert) { +// SetTitle(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementCreateBulk) OnConflict(opts ...sql.ConflictOption) *AnnouncementUpsertBulk { + _c.conflict = opts + return &AnnouncementUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementCreateBulk) OnConflictColumns(columns ...string) *AnnouncementUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementUpsertBulk{ + create: _c, + } +} + +// AnnouncementUpsertBulk is the builder for "upsert"-ing +// a bulk of Announcement nodes. +type AnnouncementUpsertBulk struct { + create *AnnouncementCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementUpsertBulk) UpdateNewValues() *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(announcement.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementUpsertBulk) Ignore() *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementUpsertBulk) DoNothing() *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementCreateBulk.OnConflict +// documentation for more info. +func (u *AnnouncementUpsertBulk) Update(set func(*AnnouncementUpsert)) *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementUpsert{UpdateSet: update}) + })) + return u +} + +// SetTitle sets the "title" field. +func (u *AnnouncementUpsertBulk) SetTitle(v string) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTitle(v) + }) +} + +// UpdateTitle sets the "title" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateTitle() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTitle() + }) +} + +// SetContent sets the "content" field. +func (u *AnnouncementUpsertBulk) SetContent(v string) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetContent(v) + }) +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateContent() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateContent() + }) +} + +// SetStatus sets the "status" field. +func (u *AnnouncementUpsertBulk) SetStatus(v string) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateStatus() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStatus() + }) +} + +// SetTargeting sets the "targeting" field. +func (u *AnnouncementUpsertBulk) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTargeting(v) + }) +} + +// UpdateTargeting sets the "targeting" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateTargeting() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTargeting() + }) +} + +// ClearTargeting clears the value of the "targeting" field. +func (u *AnnouncementUpsertBulk) ClearTargeting() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearTargeting() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *AnnouncementUpsertBulk) SetStartsAt(v time.Time) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateStartsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStartsAt() + }) +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (u *AnnouncementUpsertBulk) ClearStartsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearStartsAt() + }) +} + +// SetEndsAt sets the "ends_at" field. +func (u *AnnouncementUpsertBulk) SetEndsAt(v time.Time) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetEndsAt(v) + }) +} + +// UpdateEndsAt sets the "ends_at" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateEndsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateEndsAt() + }) +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (u *AnnouncementUpsertBulk) ClearEndsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearEndsAt() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *AnnouncementUpsertBulk) SetCreatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *AnnouncementUpsertBulk) AddCreatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateCreatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateCreatedBy() + }) +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (u *AnnouncementUpsertBulk) ClearCreatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearCreatedBy() + }) +} + +// SetUpdatedBy sets the "updated_by" field. +func (u *AnnouncementUpsertBulk) SetUpdatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedBy(v) + }) +} + +// AddUpdatedBy adds v to the "updated_by" field. +func (u *AnnouncementUpsertBulk) AddUpdatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.AddUpdatedBy(v) + }) +} + +// UpdateUpdatedBy sets the "updated_by" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateUpdatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedBy() + }) +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (u *AnnouncementUpsertBulk) ClearUpdatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearUpdatedBy() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AnnouncementUpsertBulk) SetUpdatedAt(v time.Time) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateUpdatedAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AnnouncementCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcement_delete.go b/backend/ent/announcement_delete.go new file mode 100644 index 00000000..d185e9f7 --- /dev/null +++ b/backend/ent/announcement_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AnnouncementDelete is the builder for deleting a Announcement entity. +type AnnouncementDelete struct { + config + hooks []Hook + mutation *AnnouncementMutation +} + +// Where appends a list predicates to the AnnouncementDelete builder. +func (_d *AnnouncementDelete) Where(ps ...predicate.Announcement) *AnnouncementDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AnnouncementDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AnnouncementDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(announcement.Table, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AnnouncementDeleteOne is the builder for deleting a single Announcement entity. +type AnnouncementDeleteOne struct { + _d *AnnouncementDelete +} + +// Where appends a list predicates to the AnnouncementDelete builder. +func (_d *AnnouncementDeleteOne) Where(ps ...predicate.Announcement) *AnnouncementDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AnnouncementDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{announcement.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcement_query.go b/backend/ent/announcement_query.go new file mode 100644 index 00000000..a27d50fa --- /dev/null +++ b/backend/ent/announcement_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AnnouncementQuery is the builder for querying Announcement entities. +type AnnouncementQuery struct { + config + ctx *QueryContext + order []announcement.OrderOption + inters []Interceptor + predicates []predicate.Announcement + withReads *AnnouncementReadQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AnnouncementQuery builder. +func (_q *AnnouncementQuery) Where(ps ...predicate.Announcement) *AnnouncementQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AnnouncementQuery) Limit(limit int) *AnnouncementQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AnnouncementQuery) Offset(offset int) *AnnouncementQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AnnouncementQuery) Unique(unique bool) *AnnouncementQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AnnouncementQuery) Order(o ...announcement.OrderOption) *AnnouncementQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryReads chains the current query on the "reads" edge. +func (_q *AnnouncementQuery) QueryReads() *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(announcement.Table, announcement.FieldID, selector), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, announcement.ReadsTable, announcement.ReadsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Announcement entity from the query. +// Returns a *NotFoundError when no Announcement was found. +func (_q *AnnouncementQuery) First(ctx context.Context) (*Announcement, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{announcement.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AnnouncementQuery) FirstX(ctx context.Context) *Announcement { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Announcement ID from the query. +// Returns a *NotFoundError when no Announcement ID was found. +func (_q *AnnouncementQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{announcement.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *AnnouncementQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Announcement entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Announcement entity is found. +// Returns a *NotFoundError when no Announcement entities are found. +func (_q *AnnouncementQuery) Only(ctx context.Context) (*Announcement, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{announcement.Label} + default: + return nil, &NotSingularError{announcement.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AnnouncementQuery) OnlyX(ctx context.Context) *Announcement { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Announcement ID in the query. +// Returns a *NotSingularError when more than one Announcement ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *AnnouncementQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{announcement.Label} + default: + err = &NotSingularError{announcement.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *AnnouncementQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Announcements. +func (_q *AnnouncementQuery) All(ctx context.Context) ([]*Announcement, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Announcement, *AnnouncementQuery]() + return withInterceptors[[]*Announcement](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AnnouncementQuery) AllX(ctx context.Context) []*Announcement { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Announcement IDs. +func (_q *AnnouncementQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(announcement.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *AnnouncementQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *AnnouncementQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AnnouncementQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AnnouncementQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AnnouncementQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AnnouncementQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AnnouncementQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AnnouncementQuery) Clone() *AnnouncementQuery { + if _q == nil { + return nil + } + return &AnnouncementQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]announcement.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Announcement{}, _q.predicates...), + withReads: _q.withReads.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithReads tells the query-builder to eager-load the nodes that are connected to +// the "reads" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AnnouncementQuery) WithReads(opts ...func(*AnnouncementReadQuery)) *AnnouncementQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withReads = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Title string `json:"title,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Announcement.Query(). +// GroupBy(announcement.FieldTitle). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AnnouncementQuery) GroupBy(field string, fields ...string) *AnnouncementGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AnnouncementGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = announcement.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Title string `json:"title,omitempty"` +// } +// +// client.Announcement.Query(). +// Select(announcement.FieldTitle). +// Scan(ctx, &v) +func (_q *AnnouncementQuery) Select(fields ...string) *AnnouncementSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AnnouncementSelect{AnnouncementQuery: _q} + sbuild.label = announcement.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AnnouncementSelect configured with the given aggregations. +func (_q *AnnouncementQuery) Aggregate(fns ...AggregateFunc) *AnnouncementSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AnnouncementQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !announcement.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AnnouncementQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Announcement, error) { + var ( + nodes = []*Announcement{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withReads != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Announcement).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Announcement{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withReads; query != nil { + if err := _q.loadReads(ctx, query, nodes, + func(n *Announcement) { n.Edges.Reads = []*AnnouncementRead{} }, + func(n *Announcement, e *AnnouncementRead) { n.Edges.Reads = append(n.Edges.Reads, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AnnouncementQuery) loadReads(ctx context.Context, query *AnnouncementReadQuery, nodes []*Announcement, init func(*Announcement), assign func(*Announcement, *AnnouncementRead)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Announcement) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(announcementread.FieldAnnouncementID) + } + query.Where(predicate.AnnouncementRead(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(announcement.ReadsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AnnouncementID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "announcement_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *AnnouncementQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AnnouncementQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcement.FieldID) + for i := range fields { + if fields[i] != announcement.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AnnouncementQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(announcement.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = announcement.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AnnouncementQuery) ForUpdate(opts ...sql.LockOption) *AnnouncementQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AnnouncementQuery) ForShare(opts ...sql.LockOption) *AnnouncementQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// AnnouncementGroupBy is the group-by builder for Announcement entities. +type AnnouncementGroupBy struct { + selector + build *AnnouncementQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AnnouncementGroupBy) Aggregate(fns ...AggregateFunc) *AnnouncementGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AnnouncementGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementQuery, *AnnouncementGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AnnouncementGroupBy) sqlScan(ctx context.Context, root *AnnouncementQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AnnouncementSelect is the builder for selecting fields of Announcement entities. +type AnnouncementSelect struct { + *AnnouncementQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AnnouncementSelect) Aggregate(fns ...AggregateFunc) *AnnouncementSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AnnouncementSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementQuery, *AnnouncementSelect](ctx, _s.AnnouncementQuery, _s, _s.inters, v) +} + +func (_s *AnnouncementSelect) sqlScan(ctx context.Context, root *AnnouncementQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/announcement_update.go b/backend/ent/announcement_update.go new file mode 100644 index 00000000..702d0817 --- /dev/null +++ b/backend/ent/announcement_update.go @@ -0,0 +1,824 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/internal/domain" +) + +// AnnouncementUpdate is the builder for updating Announcement entities. +type AnnouncementUpdate struct { + config + hooks []Hook + mutation *AnnouncementMutation +} + +// Where appends a list predicates to the AnnouncementUpdate builder. +func (_u *AnnouncementUpdate) Where(ps ...predicate.Announcement) *AnnouncementUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetTitle sets the "title" field. +func (_u *AnnouncementUpdate) SetTitle(v string) *AnnouncementUpdate { + _u.mutation.SetTitle(v) + return _u +} + +// SetNillableTitle sets the "title" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableTitle(v *string) *AnnouncementUpdate { + if v != nil { + _u.SetTitle(*v) + } + return _u +} + +// SetContent sets the "content" field. +func (_u *AnnouncementUpdate) SetContent(v string) *AnnouncementUpdate { + _u.mutation.SetContent(v) + return _u +} + +// SetNillableContent sets the "content" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableContent(v *string) *AnnouncementUpdate { + if v != nil { + _u.SetContent(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *AnnouncementUpdate) SetStatus(v string) *AnnouncementUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableStatus(v *string) *AnnouncementUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetTargeting sets the "targeting" field. +func (_u *AnnouncementUpdate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdate { + _u.mutation.SetTargeting(v) + return _u +} + +// SetNillableTargeting sets the "targeting" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementUpdate { + if v != nil { + _u.SetTargeting(*v) + } + return _u +} + +// ClearTargeting clears the value of the "targeting" field. +func (_u *AnnouncementUpdate) ClearTargeting() *AnnouncementUpdate { + _u.mutation.ClearTargeting() + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *AnnouncementUpdate) SetStartsAt(v time.Time) *AnnouncementUpdate { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableStartsAt(v *time.Time) *AnnouncementUpdate { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (_u *AnnouncementUpdate) ClearStartsAt() *AnnouncementUpdate { + _u.mutation.ClearStartsAt() + return _u +} + +// SetEndsAt sets the "ends_at" field. +func (_u *AnnouncementUpdate) SetEndsAt(v time.Time) *AnnouncementUpdate { + _u.mutation.SetEndsAt(v) + return _u +} + +// SetNillableEndsAt sets the "ends_at" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableEndsAt(v *time.Time) *AnnouncementUpdate { + if v != nil { + _u.SetEndsAt(*v) + } + return _u +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (_u *AnnouncementUpdate) ClearEndsAt() *AnnouncementUpdate { + _u.mutation.ClearEndsAt() + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *AnnouncementUpdate) SetCreatedBy(v int64) *AnnouncementUpdate { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableCreatedBy(v *int64) *AnnouncementUpdate { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *AnnouncementUpdate) AddCreatedBy(v int64) *AnnouncementUpdate { + _u.mutation.AddCreatedBy(v) + return _u +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (_u *AnnouncementUpdate) ClearCreatedBy() *AnnouncementUpdate { + _u.mutation.ClearCreatedBy() + return _u +} + +// SetUpdatedBy sets the "updated_by" field. +func (_u *AnnouncementUpdate) SetUpdatedBy(v int64) *AnnouncementUpdate { + _u.mutation.ResetUpdatedBy() + _u.mutation.SetUpdatedBy(v) + return _u +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableUpdatedBy(v *int64) *AnnouncementUpdate { + if v != nil { + _u.SetUpdatedBy(*v) + } + return _u +} + +// AddUpdatedBy adds value to the "updated_by" field. +func (_u *AnnouncementUpdate) AddUpdatedBy(v int64) *AnnouncementUpdate { + _u.mutation.AddUpdatedBy(v) + return _u +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (_u *AnnouncementUpdate) ClearUpdatedBy() *AnnouncementUpdate { + _u.mutation.ClearUpdatedBy() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AnnouncementUpdate) SetUpdatedAt(v time.Time) *AnnouncementUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs. +func (_u *AnnouncementUpdate) AddReadIDs(ids ...int64) *AnnouncementUpdate { + _u.mutation.AddReadIDs(ids...) + return _u +} + +// AddReads adds the "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdate) AddReads(v ...*AnnouncementRead) *AnnouncementUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddReadIDs(ids...) +} + +// Mutation returns the AnnouncementMutation object of the builder. +func (_u *AnnouncementUpdate) Mutation() *AnnouncementMutation { + return _u.mutation +} + +// ClearReads clears all "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdate) ClearReads() *AnnouncementUpdate { + _u.mutation.ClearReads() + return _u +} + +// RemoveReadIDs removes the "reads" edge to AnnouncementRead entities by IDs. +func (_u *AnnouncementUpdate) RemoveReadIDs(ids ...int64) *AnnouncementUpdate { + _u.mutation.RemoveReadIDs(ids...) + return _u +} + +// RemoveReads removes "reads" edges to AnnouncementRead entities. +func (_u *AnnouncementUpdate) RemoveReads(v ...*AnnouncementRead) *AnnouncementUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveReadIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AnnouncementUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AnnouncementUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AnnouncementUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := announcement.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementUpdate) check() error { + if v, ok := _u.mutation.Title(); ok { + if err := announcement.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)} + } + } + if v, ok := _u.mutation.Content(); ok { + if err := announcement.ContentValidator(v); err != nil { + return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := announcement.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)} + } + } + return nil +} + +func (_u *AnnouncementUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Title(); ok { + _spec.SetField(announcement.FieldTitle, field.TypeString, value) + } + if value, ok := _u.mutation.Content(); ok { + _spec.SetField(announcement.FieldContent, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(announcement.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Targeting(); ok { + _spec.SetField(announcement.FieldTargeting, field.TypeJSON, value) + } + if _u.mutation.TargetingCleared() { + _spec.ClearField(announcement.FieldTargeting, field.TypeJSON) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(announcement.FieldStartsAt, field.TypeTime, value) + } + if _u.mutation.StartsAtCleared() { + _spec.ClearField(announcement.FieldStartsAt, field.TypeTime) + } + if value, ok := _u.mutation.EndsAt(); ok { + _spec.SetField(announcement.FieldEndsAt, field.TypeTime, value) + } + if _u.mutation.EndsAtCleared() { + _spec.ClearField(announcement.FieldEndsAt, field.TypeTime) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if _u.mutation.CreatedByCleared() { + _spec.ClearField(announcement.FieldCreatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedBy(); ok { + _spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedUpdatedBy(); ok { + _spec.AddField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if _u.mutation.UpdatedByCleared() { + _spec.ClearField(announcement.FieldUpdatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedReadsIDs(); len(nodes) > 0 && !_u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcement.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AnnouncementUpdateOne is the builder for updating a single Announcement entity. +type AnnouncementUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AnnouncementMutation +} + +// SetTitle sets the "title" field. +func (_u *AnnouncementUpdateOne) SetTitle(v string) *AnnouncementUpdateOne { + _u.mutation.SetTitle(v) + return _u +} + +// SetNillableTitle sets the "title" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableTitle(v *string) *AnnouncementUpdateOne { + if v != nil { + _u.SetTitle(*v) + } + return _u +} + +// SetContent sets the "content" field. +func (_u *AnnouncementUpdateOne) SetContent(v string) *AnnouncementUpdateOne { + _u.mutation.SetContent(v) + return _u +} + +// SetNillableContent sets the "content" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableContent(v *string) *AnnouncementUpdateOne { + if v != nil { + _u.SetContent(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *AnnouncementUpdateOne) SetStatus(v string) *AnnouncementUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableStatus(v *string) *AnnouncementUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetTargeting sets the "targeting" field. +func (_u *AnnouncementUpdateOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdateOne { + _u.mutation.SetTargeting(v) + return _u +} + +// SetNillableTargeting sets the "targeting" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementUpdateOne { + if v != nil { + _u.SetTargeting(*v) + } + return _u +} + +// ClearTargeting clears the value of the "targeting" field. +func (_u *AnnouncementUpdateOne) ClearTargeting() *AnnouncementUpdateOne { + _u.mutation.ClearTargeting() + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *AnnouncementUpdateOne) SetStartsAt(v time.Time) *AnnouncementUpdateOne { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableStartsAt(v *time.Time) *AnnouncementUpdateOne { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (_u *AnnouncementUpdateOne) ClearStartsAt() *AnnouncementUpdateOne { + _u.mutation.ClearStartsAt() + return _u +} + +// SetEndsAt sets the "ends_at" field. +func (_u *AnnouncementUpdateOne) SetEndsAt(v time.Time) *AnnouncementUpdateOne { + _u.mutation.SetEndsAt(v) + return _u +} + +// SetNillableEndsAt sets the "ends_at" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableEndsAt(v *time.Time) *AnnouncementUpdateOne { + if v != nil { + _u.SetEndsAt(*v) + } + return _u +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (_u *AnnouncementUpdateOne) ClearEndsAt() *AnnouncementUpdateOne { + _u.mutation.ClearEndsAt() + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *AnnouncementUpdateOne) SetCreatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableCreatedBy(v *int64) *AnnouncementUpdateOne { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *AnnouncementUpdateOne) AddCreatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.AddCreatedBy(v) + return _u +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (_u *AnnouncementUpdateOne) ClearCreatedBy() *AnnouncementUpdateOne { + _u.mutation.ClearCreatedBy() + return _u +} + +// SetUpdatedBy sets the "updated_by" field. +func (_u *AnnouncementUpdateOne) SetUpdatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.ResetUpdatedBy() + _u.mutation.SetUpdatedBy(v) + return _u +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableUpdatedBy(v *int64) *AnnouncementUpdateOne { + if v != nil { + _u.SetUpdatedBy(*v) + } + return _u +} + +// AddUpdatedBy adds value to the "updated_by" field. +func (_u *AnnouncementUpdateOne) AddUpdatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.AddUpdatedBy(v) + return _u +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (_u *AnnouncementUpdateOne) ClearUpdatedBy() *AnnouncementUpdateOne { + _u.mutation.ClearUpdatedBy() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AnnouncementUpdateOne) SetUpdatedAt(v time.Time) *AnnouncementUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs. +func (_u *AnnouncementUpdateOne) AddReadIDs(ids ...int64) *AnnouncementUpdateOne { + _u.mutation.AddReadIDs(ids...) + return _u +} + +// AddReads adds the "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdateOne) AddReads(v ...*AnnouncementRead) *AnnouncementUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddReadIDs(ids...) +} + +// Mutation returns the AnnouncementMutation object of the builder. +func (_u *AnnouncementUpdateOne) Mutation() *AnnouncementMutation { + return _u.mutation +} + +// ClearReads clears all "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdateOne) ClearReads() *AnnouncementUpdateOne { + _u.mutation.ClearReads() + return _u +} + +// RemoveReadIDs removes the "reads" edge to AnnouncementRead entities by IDs. +func (_u *AnnouncementUpdateOne) RemoveReadIDs(ids ...int64) *AnnouncementUpdateOne { + _u.mutation.RemoveReadIDs(ids...) + return _u +} + +// RemoveReads removes "reads" edges to AnnouncementRead entities. +func (_u *AnnouncementUpdateOne) RemoveReads(v ...*AnnouncementRead) *AnnouncementUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveReadIDs(ids...) +} + +// Where appends a list predicates to the AnnouncementUpdate builder. +func (_u *AnnouncementUpdateOne) Where(ps ...predicate.Announcement) *AnnouncementUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AnnouncementUpdateOne) Select(field string, fields ...string) *AnnouncementUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Announcement entity. +func (_u *AnnouncementUpdateOne) Save(ctx context.Context) (*Announcement, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementUpdateOne) SaveX(ctx context.Context) *Announcement { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AnnouncementUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AnnouncementUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := announcement.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementUpdateOne) check() error { + if v, ok := _u.mutation.Title(); ok { + if err := announcement.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)} + } + } + if v, ok := _u.mutation.Content(); ok { + if err := announcement.ContentValidator(v); err != nil { + return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := announcement.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)} + } + } + return nil +} + +func (_u *AnnouncementUpdateOne) sqlSave(ctx context.Context) (_node *Announcement, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Announcement.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcement.FieldID) + for _, f := range fields { + if !announcement.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != announcement.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Title(); ok { + _spec.SetField(announcement.FieldTitle, field.TypeString, value) + } + if value, ok := _u.mutation.Content(); ok { + _spec.SetField(announcement.FieldContent, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(announcement.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Targeting(); ok { + _spec.SetField(announcement.FieldTargeting, field.TypeJSON, value) + } + if _u.mutation.TargetingCleared() { + _spec.ClearField(announcement.FieldTargeting, field.TypeJSON) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(announcement.FieldStartsAt, field.TypeTime, value) + } + if _u.mutation.StartsAtCleared() { + _spec.ClearField(announcement.FieldStartsAt, field.TypeTime) + } + if value, ok := _u.mutation.EndsAt(); ok { + _spec.SetField(announcement.FieldEndsAt, field.TypeTime, value) + } + if _u.mutation.EndsAtCleared() { + _spec.ClearField(announcement.FieldEndsAt, field.TypeTime) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if _u.mutation.CreatedByCleared() { + _spec.ClearField(announcement.FieldCreatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedBy(); ok { + _spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedUpdatedBy(); ok { + _spec.AddField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if _u.mutation.UpdatedByCleared() { + _spec.ClearField(announcement.FieldUpdatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedReadsIDs(); len(nodes) > 0 && !_u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Announcement{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcement.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/announcementread.go b/backend/ent/announcementread.go new file mode 100644 index 00000000..7bba04f2 --- /dev/null +++ b/backend/ent/announcementread.go @@ -0,0 +1,185 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementRead is the model entity for the AnnouncementRead schema. +type AnnouncementRead struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // AnnouncementID holds the value of the "announcement_id" field. + AnnouncementID int64 `json:"announcement_id,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // 用户首次已读时间 + ReadAt time.Time `json:"read_at,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AnnouncementReadQuery when eager-loading is set. + Edges AnnouncementReadEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AnnouncementReadEdges holds the relations/edges for other nodes in the graph. +type AnnouncementReadEdges struct { + // Announcement holds the value of the announcement edge. + Announcement *Announcement `json:"announcement,omitempty"` + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// AnnouncementOrErr returns the Announcement value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AnnouncementReadEdges) AnnouncementOrErr() (*Announcement, error) { + if e.Announcement != nil { + return e.Announcement, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: announcement.Label} + } + return nil, &NotLoadedError{edge: "announcement"} +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AnnouncementReadEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AnnouncementRead) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case announcementread.FieldID, announcementread.FieldAnnouncementID, announcementread.FieldUserID: + values[i] = new(sql.NullInt64) + case announcementread.FieldReadAt, announcementread.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AnnouncementRead fields. +func (_m *AnnouncementRead) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case announcementread.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case announcementread.FieldAnnouncementID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field announcement_id", values[i]) + } else if value.Valid { + _m.AnnouncementID = value.Int64 + } + case announcementread.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case announcementread.FieldReadAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field read_at", values[i]) + } else if value.Valid { + _m.ReadAt = value.Time + } + case announcementread.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AnnouncementRead. +// This includes values selected through modifiers, order, etc. +func (_m *AnnouncementRead) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAnnouncement queries the "announcement" edge of the AnnouncementRead entity. +func (_m *AnnouncementRead) QueryAnnouncement() *AnnouncementQuery { + return NewAnnouncementReadClient(_m.config).QueryAnnouncement(_m) +} + +// QueryUser queries the "user" edge of the AnnouncementRead entity. +func (_m *AnnouncementRead) QueryUser() *UserQuery { + return NewAnnouncementReadClient(_m.config).QueryUser(_m) +} + +// Update returns a builder for updating this AnnouncementRead. +// Note that you need to call AnnouncementRead.Unwrap() before calling this method if this AnnouncementRead +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *AnnouncementRead) Update() *AnnouncementReadUpdateOne { + return NewAnnouncementReadClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the AnnouncementRead entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *AnnouncementRead) Unwrap() *AnnouncementRead { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: AnnouncementRead is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *AnnouncementRead) String() string { + var builder strings.Builder + builder.WriteString("AnnouncementRead(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("announcement_id=") + builder.WriteString(fmt.Sprintf("%v", _m.AnnouncementID)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("read_at=") + builder.WriteString(_m.ReadAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// AnnouncementReads is a parsable slice of AnnouncementRead. +type AnnouncementReads []*AnnouncementRead diff --git a/backend/ent/announcementread/announcementread.go b/backend/ent/announcementread/announcementread.go new file mode 100644 index 00000000..cf5fe458 --- /dev/null +++ b/backend/ent/announcementread/announcementread.go @@ -0,0 +1,127 @@ +// Code generated by ent, DO NOT EDIT. + +package announcementread + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the announcementread type in the database. + Label = "announcement_read" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldAnnouncementID holds the string denoting the announcement_id field in the database. + FieldAnnouncementID = "announcement_id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldReadAt holds the string denoting the read_at field in the database. + FieldReadAt = "read_at" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeAnnouncement holds the string denoting the announcement edge name in mutations. + EdgeAnnouncement = "announcement" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // Table holds the table name of the announcementread in the database. + Table = "announcement_reads" + // AnnouncementTable is the table that holds the announcement relation/edge. + AnnouncementTable = "announcement_reads" + // AnnouncementInverseTable is the table name for the Announcement entity. + // It exists in this package in order to avoid circular dependency with the "announcement" package. + AnnouncementInverseTable = "announcements" + // AnnouncementColumn is the table column denoting the announcement relation/edge. + AnnouncementColumn = "announcement_id" + // UserTable is the table that holds the user relation/edge. + UserTable = "announcement_reads" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" +) + +// Columns holds all SQL columns for announcementread fields. +var Columns = []string{ + FieldID, + FieldAnnouncementID, + FieldUserID, + FieldReadAt, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultReadAt holds the default value on creation for the "read_at" field. + DefaultReadAt func() time.Time + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the AnnouncementRead queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByAnnouncementID orders the results by the announcement_id field. +func ByAnnouncementID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAnnouncementID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByReadAt orders the results by the read_at field. +func ByReadAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldReadAt, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByAnnouncementField orders the results by announcement field. +func ByAnnouncementField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAnnouncementStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} +func newAnnouncementStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AnnouncementInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AnnouncementTable, AnnouncementColumn), + ) +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} diff --git a/backend/ent/announcementread/where.go b/backend/ent/announcementread/where.go new file mode 100644 index 00000000..1a4305e8 --- /dev/null +++ b/backend/ent/announcementread/where.go @@ -0,0 +1,257 @@ +// Code generated by ent, DO NOT EDIT. + +package announcementread + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLTE(FieldID, id)) +} + +// AnnouncementID applies equality check predicate on the "announcement_id" field. It's identical to AnnouncementIDEQ. +func AnnouncementID(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldAnnouncementID, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldUserID, v)) +} + +// ReadAt applies equality check predicate on the "read_at" field. It's identical to ReadAtEQ. +func ReadAt(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldReadAt, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldCreatedAt, v)) +} + +// AnnouncementIDEQ applies the EQ predicate on the "announcement_id" field. +func AnnouncementIDEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldAnnouncementID, v)) +} + +// AnnouncementIDNEQ applies the NEQ predicate on the "announcement_id" field. +func AnnouncementIDNEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldAnnouncementID, v)) +} + +// AnnouncementIDIn applies the In predicate on the "announcement_id" field. +func AnnouncementIDIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldAnnouncementID, vs...)) +} + +// AnnouncementIDNotIn applies the NotIn predicate on the "announcement_id" field. +func AnnouncementIDNotIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldAnnouncementID, vs...)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldUserID, vs...)) +} + +// ReadAtEQ applies the EQ predicate on the "read_at" field. +func ReadAtEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldReadAt, v)) +} + +// ReadAtNEQ applies the NEQ predicate on the "read_at" field. +func ReadAtNEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldReadAt, v)) +} + +// ReadAtIn applies the In predicate on the "read_at" field. +func ReadAtIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldReadAt, vs...)) +} + +// ReadAtNotIn applies the NotIn predicate on the "read_at" field. +func ReadAtNotIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldReadAt, vs...)) +} + +// ReadAtGT applies the GT predicate on the "read_at" field. +func ReadAtGT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGT(FieldReadAt, v)) +} + +// ReadAtGTE applies the GTE predicate on the "read_at" field. +func ReadAtGTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGTE(FieldReadAt, v)) +} + +// ReadAtLT applies the LT predicate on the "read_at" field. +func ReadAtLT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLT(FieldReadAt, v)) +} + +// ReadAtLTE applies the LTE predicate on the "read_at" field. +func ReadAtLTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLTE(FieldReadAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasAnnouncement applies the HasEdge predicate on the "announcement" edge. +func HasAnnouncement() predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AnnouncementTable, AnnouncementColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAnnouncementWith applies the HasEdge predicate on the "announcement" edge with a given conditions (other predicates). +func HasAnnouncementWith(preds ...predicate.Announcement) predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := newAnnouncementStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AnnouncementRead) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AnnouncementRead) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AnnouncementRead) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.NotPredicates(p)) +} diff --git a/backend/ent/announcementread_create.go b/backend/ent/announcementread_create.go new file mode 100644 index 00000000..c8c211ff --- /dev/null +++ b/backend/ent/announcementread_create.go @@ -0,0 +1,660 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementReadCreate is the builder for creating a AnnouncementRead entity. +type AnnouncementReadCreate struct { + config + mutation *AnnouncementReadMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetAnnouncementID sets the "announcement_id" field. +func (_c *AnnouncementReadCreate) SetAnnouncementID(v int64) *AnnouncementReadCreate { + _c.mutation.SetAnnouncementID(v) + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *AnnouncementReadCreate) SetUserID(v int64) *AnnouncementReadCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetReadAt sets the "read_at" field. +func (_c *AnnouncementReadCreate) SetReadAt(v time.Time) *AnnouncementReadCreate { + _c.mutation.SetReadAt(v) + return _c +} + +// SetNillableReadAt sets the "read_at" field if the given value is not nil. +func (_c *AnnouncementReadCreate) SetNillableReadAt(v *time.Time) *AnnouncementReadCreate { + if v != nil { + _c.SetReadAt(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AnnouncementReadCreate) SetCreatedAt(v time.Time) *AnnouncementReadCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AnnouncementReadCreate) SetNillableCreatedAt(v *time.Time) *AnnouncementReadCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetAnnouncement sets the "announcement" edge to the Announcement entity. +func (_c *AnnouncementReadCreate) SetAnnouncement(v *Announcement) *AnnouncementReadCreate { + return _c.SetAnnouncementID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_c *AnnouncementReadCreate) SetUser(v *User) *AnnouncementReadCreate { + return _c.SetUserID(v.ID) +} + +// Mutation returns the AnnouncementReadMutation object of the builder. +func (_c *AnnouncementReadCreate) Mutation() *AnnouncementReadMutation { + return _c.mutation +} + +// Save creates the AnnouncementRead in the database. +func (_c *AnnouncementReadCreate) Save(ctx context.Context) (*AnnouncementRead, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AnnouncementReadCreate) SaveX(ctx context.Context) *AnnouncementRead { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementReadCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementReadCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AnnouncementReadCreate) defaults() { + if _, ok := _c.mutation.ReadAt(); !ok { + v := announcementread.DefaultReadAt() + _c.mutation.SetReadAt(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := announcementread.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AnnouncementReadCreate) check() error { + if _, ok := _c.mutation.AnnouncementID(); !ok { + return &ValidationError{Name: "announcement_id", err: errors.New(`ent: missing required field "AnnouncementRead.announcement_id"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "AnnouncementRead.user_id"`)} + } + if _, ok := _c.mutation.ReadAt(); !ok { + return &ValidationError{Name: "read_at", err: errors.New(`ent: missing required field "AnnouncementRead.read_at"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AnnouncementRead.created_at"`)} + } + if len(_c.mutation.AnnouncementIDs()) == 0 { + return &ValidationError{Name: "announcement", err: errors.New(`ent: missing required edge "AnnouncementRead.announcement"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "AnnouncementRead.user"`)} + } + return nil +} + +func (_c *AnnouncementReadCreate) sqlSave(ctx context.Context) (*AnnouncementRead, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *AnnouncementReadCreate) createSpec() (*AnnouncementRead, *sqlgraph.CreateSpec) { + var ( + _node = &AnnouncementRead{config: _c.config} + _spec = sqlgraph.NewCreateSpec(announcementread.Table, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.ReadAt(); ok { + _spec.SetField(announcementread.FieldReadAt, field.TypeTime, value) + _node.ReadAt = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(announcementread.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.AnnouncementIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AnnouncementID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AnnouncementRead.Create(). +// SetAnnouncementID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementReadUpsert) { +// SetAnnouncementID(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementReadCreate) OnConflict(opts ...sql.ConflictOption) *AnnouncementReadUpsertOne { + _c.conflict = opts + return &AnnouncementReadUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementReadCreate) OnConflictColumns(columns ...string) *AnnouncementReadUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementReadUpsertOne{ + create: _c, + } +} + +type ( + // AnnouncementReadUpsertOne is the builder for "upsert"-ing + // one AnnouncementRead node. + AnnouncementReadUpsertOne struct { + create *AnnouncementReadCreate + } + + // AnnouncementReadUpsert is the "OnConflict" setter. + AnnouncementReadUpsert struct { + *sql.UpdateSet + } +) + +// SetAnnouncementID sets the "announcement_id" field. +func (u *AnnouncementReadUpsert) SetAnnouncementID(v int64) *AnnouncementReadUpsert { + u.Set(announcementread.FieldAnnouncementID, v) + return u +} + +// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsert) UpdateAnnouncementID() *AnnouncementReadUpsert { + u.SetExcluded(announcementread.FieldAnnouncementID) + return u +} + +// SetUserID sets the "user_id" field. +func (u *AnnouncementReadUpsert) SetUserID(v int64) *AnnouncementReadUpsert { + u.Set(announcementread.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsert) UpdateUserID() *AnnouncementReadUpsert { + u.SetExcluded(announcementread.FieldUserID) + return u +} + +// SetReadAt sets the "read_at" field. +func (u *AnnouncementReadUpsert) SetReadAt(v time.Time) *AnnouncementReadUpsert { + u.Set(announcementread.FieldReadAt, v) + return u +} + +// UpdateReadAt sets the "read_at" field to the value that was provided on create. +func (u *AnnouncementReadUpsert) UpdateReadAt() *AnnouncementReadUpsert { + u.SetExcluded(announcementread.FieldReadAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementReadUpsertOne) UpdateNewValues() *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(announcementread.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementReadUpsertOne) Ignore() *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementReadUpsertOne) DoNothing() *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementReadCreate.OnConflict +// documentation for more info. +func (u *AnnouncementReadUpsertOne) Update(set func(*AnnouncementReadUpsert)) *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementReadUpsert{UpdateSet: update}) + })) + return u +} + +// SetAnnouncementID sets the "announcement_id" field. +func (u *AnnouncementReadUpsertOne) SetAnnouncementID(v int64) *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetAnnouncementID(v) + }) +} + +// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertOne) UpdateAnnouncementID() *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateAnnouncementID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *AnnouncementReadUpsertOne) SetUserID(v int64) *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertOne) UpdateUserID() *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateUserID() + }) +} + +// SetReadAt sets the "read_at" field. +func (u *AnnouncementReadUpsertOne) SetReadAt(v time.Time) *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetReadAt(v) + }) +} + +// UpdateReadAt sets the "read_at" field to the value that was provided on create. +func (u *AnnouncementReadUpsertOne) UpdateReadAt() *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateReadAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementReadUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementReadCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementReadUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *AnnouncementReadUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *AnnouncementReadUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// AnnouncementReadCreateBulk is the builder for creating many AnnouncementRead entities in bulk. +type AnnouncementReadCreateBulk struct { + config + err error + builders []*AnnouncementReadCreate + conflict []sql.ConflictOption +} + +// Save creates the AnnouncementRead entities in the database. +func (_c *AnnouncementReadCreateBulk) Save(ctx context.Context) ([]*AnnouncementRead, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*AnnouncementRead, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AnnouncementReadMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AnnouncementReadCreateBulk) SaveX(ctx context.Context) []*AnnouncementRead { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementReadCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementReadCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AnnouncementRead.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementReadUpsert) { +// SetAnnouncementID(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementReadCreateBulk) OnConflict(opts ...sql.ConflictOption) *AnnouncementReadUpsertBulk { + _c.conflict = opts + return &AnnouncementReadUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementReadCreateBulk) OnConflictColumns(columns ...string) *AnnouncementReadUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementReadUpsertBulk{ + create: _c, + } +} + +// AnnouncementReadUpsertBulk is the builder for "upsert"-ing +// a bulk of AnnouncementRead nodes. +type AnnouncementReadUpsertBulk struct { + create *AnnouncementReadCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementReadUpsertBulk) UpdateNewValues() *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(announcementread.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementReadUpsertBulk) Ignore() *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementReadUpsertBulk) DoNothing() *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementReadCreateBulk.OnConflict +// documentation for more info. +func (u *AnnouncementReadUpsertBulk) Update(set func(*AnnouncementReadUpsert)) *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementReadUpsert{UpdateSet: update}) + })) + return u +} + +// SetAnnouncementID sets the "announcement_id" field. +func (u *AnnouncementReadUpsertBulk) SetAnnouncementID(v int64) *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetAnnouncementID(v) + }) +} + +// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertBulk) UpdateAnnouncementID() *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateAnnouncementID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *AnnouncementReadUpsertBulk) SetUserID(v int64) *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertBulk) UpdateUserID() *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateUserID() + }) +} + +// SetReadAt sets the "read_at" field. +func (u *AnnouncementReadUpsertBulk) SetReadAt(v time.Time) *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetReadAt(v) + }) +} + +// UpdateReadAt sets the "read_at" field to the value that was provided on create. +func (u *AnnouncementReadUpsertBulk) UpdateReadAt() *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateReadAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementReadUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AnnouncementReadCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementReadCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementReadUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcementread_delete.go b/backend/ent/announcementread_delete.go new file mode 100644 index 00000000..a4da0821 --- /dev/null +++ b/backend/ent/announcementread_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AnnouncementReadDelete is the builder for deleting a AnnouncementRead entity. +type AnnouncementReadDelete struct { + config + hooks []Hook + mutation *AnnouncementReadMutation +} + +// Where appends a list predicates to the AnnouncementReadDelete builder. +func (_d *AnnouncementReadDelete) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AnnouncementReadDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementReadDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AnnouncementReadDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(announcementread.Table, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AnnouncementReadDeleteOne is the builder for deleting a single AnnouncementRead entity. +type AnnouncementReadDeleteOne struct { + _d *AnnouncementReadDelete +} + +// Where appends a list predicates to the AnnouncementReadDelete builder. +func (_d *AnnouncementReadDeleteOne) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AnnouncementReadDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{announcementread.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementReadDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcementread_query.go b/backend/ent/announcementread_query.go new file mode 100644 index 00000000..108299fd --- /dev/null +++ b/backend/ent/announcementread_query.go @@ -0,0 +1,718 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementReadQuery is the builder for querying AnnouncementRead entities. +type AnnouncementReadQuery struct { + config + ctx *QueryContext + order []announcementread.OrderOption + inters []Interceptor + predicates []predicate.AnnouncementRead + withAnnouncement *AnnouncementQuery + withUser *UserQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AnnouncementReadQuery builder. +func (_q *AnnouncementReadQuery) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AnnouncementReadQuery) Limit(limit int) *AnnouncementReadQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AnnouncementReadQuery) Offset(offset int) *AnnouncementReadQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AnnouncementReadQuery) Unique(unique bool) *AnnouncementReadQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AnnouncementReadQuery) Order(o ...announcementread.OrderOption) *AnnouncementReadQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAnnouncement chains the current query on the "announcement" edge. +func (_q *AnnouncementReadQuery) QueryAnnouncement() *AnnouncementQuery { + query := (&AnnouncementClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, selector), + sqlgraph.To(announcement.Table, announcement.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.AnnouncementTable, announcementread.AnnouncementColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUser chains the current query on the "user" edge. +func (_q *AnnouncementReadQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.UserTable, announcementread.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first AnnouncementRead entity from the query. +// Returns a *NotFoundError when no AnnouncementRead was found. +func (_q *AnnouncementReadQuery) First(ctx context.Context) (*AnnouncementRead, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{announcementread.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AnnouncementReadQuery) FirstX(ctx context.Context) *AnnouncementRead { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AnnouncementRead ID from the query. +// Returns a *NotFoundError when no AnnouncementRead ID was found. +func (_q *AnnouncementReadQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{announcementread.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *AnnouncementReadQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AnnouncementRead entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AnnouncementRead entity is found. +// Returns a *NotFoundError when no AnnouncementRead entities are found. +func (_q *AnnouncementReadQuery) Only(ctx context.Context) (*AnnouncementRead, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{announcementread.Label} + default: + return nil, &NotSingularError{announcementread.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AnnouncementReadQuery) OnlyX(ctx context.Context) *AnnouncementRead { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AnnouncementRead ID in the query. +// Returns a *NotSingularError when more than one AnnouncementRead ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *AnnouncementReadQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{announcementread.Label} + default: + err = &NotSingularError{announcementread.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *AnnouncementReadQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AnnouncementReads. +func (_q *AnnouncementReadQuery) All(ctx context.Context) ([]*AnnouncementRead, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AnnouncementRead, *AnnouncementReadQuery]() + return withInterceptors[[]*AnnouncementRead](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AnnouncementReadQuery) AllX(ctx context.Context) []*AnnouncementRead { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AnnouncementRead IDs. +func (_q *AnnouncementReadQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(announcementread.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *AnnouncementReadQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *AnnouncementReadQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AnnouncementReadQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AnnouncementReadQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AnnouncementReadQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AnnouncementReadQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AnnouncementReadQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AnnouncementReadQuery) Clone() *AnnouncementReadQuery { + if _q == nil { + return nil + } + return &AnnouncementReadQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]announcementread.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.AnnouncementRead{}, _q.predicates...), + withAnnouncement: _q.withAnnouncement.Clone(), + withUser: _q.withUser.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAnnouncement tells the query-builder to eager-load the nodes that are connected to +// the "announcement" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AnnouncementReadQuery) WithAnnouncement(opts ...func(*AnnouncementQuery)) *AnnouncementReadQuery { + query := (&AnnouncementClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAnnouncement = query + return _q +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AnnouncementReadQuery) WithUser(opts ...func(*UserQuery)) *AnnouncementReadQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// AnnouncementID int64 `json:"announcement_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AnnouncementRead.Query(). +// GroupBy(announcementread.FieldAnnouncementID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AnnouncementReadQuery) GroupBy(field string, fields ...string) *AnnouncementReadGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AnnouncementReadGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = announcementread.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// AnnouncementID int64 `json:"announcement_id,omitempty"` +// } +// +// client.AnnouncementRead.Query(). +// Select(announcementread.FieldAnnouncementID). +// Scan(ctx, &v) +func (_q *AnnouncementReadQuery) Select(fields ...string) *AnnouncementReadSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AnnouncementReadSelect{AnnouncementReadQuery: _q} + sbuild.label = announcementread.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AnnouncementReadSelect configured with the given aggregations. +func (_q *AnnouncementReadQuery) Aggregate(fns ...AggregateFunc) *AnnouncementReadSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AnnouncementReadQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !announcementread.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AnnouncementReadQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AnnouncementRead, error) { + var ( + nodes = []*AnnouncementRead{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withAnnouncement != nil, + _q.withUser != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AnnouncementRead).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AnnouncementRead{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAnnouncement; query != nil { + if err := _q.loadAnnouncement(ctx, query, nodes, nil, + func(n *AnnouncementRead, e *Announcement) { n.Edges.Announcement = e }); err != nil { + return nil, err + } + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *AnnouncementRead, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AnnouncementReadQuery) loadAnnouncement(ctx context.Context, query *AnnouncementQuery, nodes []*AnnouncementRead, init func(*AnnouncementRead), assign func(*AnnouncementRead, *Announcement)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AnnouncementRead) + for i := range nodes { + fk := nodes[i].AnnouncementID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(announcement.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "announcement_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *AnnouncementReadQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*AnnouncementRead, init func(*AnnouncementRead), assign func(*AnnouncementRead, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AnnouncementRead) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *AnnouncementReadQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AnnouncementReadQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcementread.FieldID) + for i := range fields { + if fields[i] != announcementread.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withAnnouncement != nil { + _spec.Node.AddColumnOnce(announcementread.FieldAnnouncementID) + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(announcementread.FieldUserID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AnnouncementReadQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(announcementread.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = announcementread.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AnnouncementReadQuery) ForUpdate(opts ...sql.LockOption) *AnnouncementReadQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AnnouncementReadQuery) ForShare(opts ...sql.LockOption) *AnnouncementReadQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// AnnouncementReadGroupBy is the group-by builder for AnnouncementRead entities. +type AnnouncementReadGroupBy struct { + selector + build *AnnouncementReadQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AnnouncementReadGroupBy) Aggregate(fns ...AggregateFunc) *AnnouncementReadGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AnnouncementReadGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementReadQuery, *AnnouncementReadGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AnnouncementReadGroupBy) sqlScan(ctx context.Context, root *AnnouncementReadQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AnnouncementReadSelect is the builder for selecting fields of AnnouncementRead entities. +type AnnouncementReadSelect struct { + *AnnouncementReadQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AnnouncementReadSelect) Aggregate(fns ...AggregateFunc) *AnnouncementReadSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AnnouncementReadSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementReadQuery, *AnnouncementReadSelect](ctx, _s.AnnouncementReadQuery, _s, _s.inters, v) +} + +func (_s *AnnouncementReadSelect) sqlScan(ctx context.Context, root *AnnouncementReadQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/announcementread_update.go b/backend/ent/announcementread_update.go new file mode 100644 index 00000000..55a4eef8 --- /dev/null +++ b/backend/ent/announcementread_update.go @@ -0,0 +1,456 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementReadUpdate is the builder for updating AnnouncementRead entities. +type AnnouncementReadUpdate struct { + config + hooks []Hook + mutation *AnnouncementReadMutation +} + +// Where appends a list predicates to the AnnouncementReadUpdate builder. +func (_u *AnnouncementReadUpdate) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetAnnouncementID sets the "announcement_id" field. +func (_u *AnnouncementReadUpdate) SetAnnouncementID(v int64) *AnnouncementReadUpdate { + _u.mutation.SetAnnouncementID(v) + return _u +} + +// SetNillableAnnouncementID sets the "announcement_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdate) SetNillableAnnouncementID(v *int64) *AnnouncementReadUpdate { + if v != nil { + _u.SetAnnouncementID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *AnnouncementReadUpdate) SetUserID(v int64) *AnnouncementReadUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdate) SetNillableUserID(v *int64) *AnnouncementReadUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetReadAt sets the "read_at" field. +func (_u *AnnouncementReadUpdate) SetReadAt(v time.Time) *AnnouncementReadUpdate { + _u.mutation.SetReadAt(v) + return _u +} + +// SetNillableReadAt sets the "read_at" field if the given value is not nil. +func (_u *AnnouncementReadUpdate) SetNillableReadAt(v *time.Time) *AnnouncementReadUpdate { + if v != nil { + _u.SetReadAt(*v) + } + return _u +} + +// SetAnnouncement sets the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdate) SetAnnouncement(v *Announcement) *AnnouncementReadUpdate { + return _u.SetAnnouncementID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *AnnouncementReadUpdate) SetUser(v *User) *AnnouncementReadUpdate { + return _u.SetUserID(v.ID) +} + +// Mutation returns the AnnouncementReadMutation object of the builder. +func (_u *AnnouncementReadUpdate) Mutation() *AnnouncementReadMutation { + return _u.mutation +} + +// ClearAnnouncement clears the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdate) ClearAnnouncement() *AnnouncementReadUpdate { + _u.mutation.ClearAnnouncement() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *AnnouncementReadUpdate) ClearUser() *AnnouncementReadUpdate { + _u.mutation.ClearUser() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AnnouncementReadUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementReadUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AnnouncementReadUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementReadUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementReadUpdate) check() error { + if _u.mutation.AnnouncementCleared() && len(_u.mutation.AnnouncementIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.announcement"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.user"`) + } + return nil +} + +func (_u *AnnouncementReadUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.ReadAt(); ok { + _spec.SetField(announcementread.FieldReadAt, field.TypeTime, value) + } + if _u.mutation.AnnouncementCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcementread.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AnnouncementReadUpdateOne is the builder for updating a single AnnouncementRead entity. +type AnnouncementReadUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AnnouncementReadMutation +} + +// SetAnnouncementID sets the "announcement_id" field. +func (_u *AnnouncementReadUpdateOne) SetAnnouncementID(v int64) *AnnouncementReadUpdateOne { + _u.mutation.SetAnnouncementID(v) + return _u +} + +// SetNillableAnnouncementID sets the "announcement_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdateOne) SetNillableAnnouncementID(v *int64) *AnnouncementReadUpdateOne { + if v != nil { + _u.SetAnnouncementID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *AnnouncementReadUpdateOne) SetUserID(v int64) *AnnouncementReadUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdateOne) SetNillableUserID(v *int64) *AnnouncementReadUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetReadAt sets the "read_at" field. +func (_u *AnnouncementReadUpdateOne) SetReadAt(v time.Time) *AnnouncementReadUpdateOne { + _u.mutation.SetReadAt(v) + return _u +} + +// SetNillableReadAt sets the "read_at" field if the given value is not nil. +func (_u *AnnouncementReadUpdateOne) SetNillableReadAt(v *time.Time) *AnnouncementReadUpdateOne { + if v != nil { + _u.SetReadAt(*v) + } + return _u +} + +// SetAnnouncement sets the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdateOne) SetAnnouncement(v *Announcement) *AnnouncementReadUpdateOne { + return _u.SetAnnouncementID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *AnnouncementReadUpdateOne) SetUser(v *User) *AnnouncementReadUpdateOne { + return _u.SetUserID(v.ID) +} + +// Mutation returns the AnnouncementReadMutation object of the builder. +func (_u *AnnouncementReadUpdateOne) Mutation() *AnnouncementReadMutation { + return _u.mutation +} + +// ClearAnnouncement clears the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdateOne) ClearAnnouncement() *AnnouncementReadUpdateOne { + _u.mutation.ClearAnnouncement() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *AnnouncementReadUpdateOne) ClearUser() *AnnouncementReadUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// Where appends a list predicates to the AnnouncementReadUpdate builder. +func (_u *AnnouncementReadUpdateOne) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AnnouncementReadUpdateOne) Select(field string, fields ...string) *AnnouncementReadUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated AnnouncementRead entity. +func (_u *AnnouncementReadUpdateOne) Save(ctx context.Context) (*AnnouncementRead, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementReadUpdateOne) SaveX(ctx context.Context) *AnnouncementRead { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AnnouncementReadUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementReadUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementReadUpdateOne) check() error { + if _u.mutation.AnnouncementCleared() && len(_u.mutation.AnnouncementIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.announcement"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.user"`) + } + return nil +} + +func (_u *AnnouncementReadUpdateOne) sqlSave(ctx context.Context) (_node *AnnouncementRead, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AnnouncementRead.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcementread.FieldID) + for _, f := range fields { + if !announcementread.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != announcementread.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.ReadAt(); ok { + _spec.SetField(announcementread.FieldReadAt, field.TypeTime, value) + } + if _u.mutation.AnnouncementCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &AnnouncementRead{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcementread.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/client.go b/backend/ent/client.go index f6c13e84..a17721da 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -17,6 +17,8 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocode" @@ -46,6 +48,10 @@ type Client struct { Account *AccountClient // AccountGroup is the client for interacting with the AccountGroup builders. AccountGroup *AccountGroupClient + // Announcement is the client for interacting with the Announcement builders. + Announcement *AnnouncementClient + // AnnouncementRead is the client for interacting with the AnnouncementRead builders. + AnnouncementRead *AnnouncementReadClient // Group is the client for interacting with the Group builders. Group *GroupClient // PromoCode is the client for interacting with the PromoCode builders. @@ -86,6 +92,8 @@ func (c *Client) init() { c.APIKey = NewAPIKeyClient(c.config) c.Account = NewAccountClient(c.config) c.AccountGroup = NewAccountGroupClient(c.config) + c.Announcement = NewAnnouncementClient(c.config) + c.AnnouncementRead = NewAnnouncementReadClient(c.config) c.Group = NewGroupClient(c.config) c.PromoCode = NewPromoCodeClient(c.config) c.PromoCodeUsage = NewPromoCodeUsageClient(c.config) @@ -194,6 +202,8 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { APIKey: NewAPIKeyClient(cfg), Account: NewAccountClient(cfg), AccountGroup: NewAccountGroupClient(cfg), + Announcement: NewAnnouncementClient(cfg), + AnnouncementRead: NewAnnouncementReadClient(cfg), Group: NewGroupClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), @@ -229,6 +239,8 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) APIKey: NewAPIKeyClient(cfg), Account: NewAccountClient(cfg), AccountGroup: NewAccountGroupClient(cfg), + Announcement: NewAnnouncementClient(cfg), + AnnouncementRead: NewAnnouncementReadClient(cfg), Group: NewGroupClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), @@ -271,10 +283,10 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.Setting, c.UsageCleanupTask, c.UsageLog, c.User, - c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, - c.UserSubscription, + c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, + c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) } @@ -284,10 +296,10 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.Setting, c.UsageCleanupTask, c.UsageLog, c.User, - c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, - c.UserSubscription, + c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, + c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) } @@ -302,6 +314,10 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Account.mutate(ctx, m) case *AccountGroupMutation: return c.AccountGroup.mutate(ctx, m) + case *AnnouncementMutation: + return c.Announcement.mutate(ctx, m) + case *AnnouncementReadMutation: + return c.AnnouncementRead.mutate(ctx, m) case *GroupMutation: return c.Group.mutate(ctx, m) case *PromoCodeMutation: @@ -831,6 +847,320 @@ func (c *AccountGroupClient) mutate(ctx context.Context, m *AccountGroupMutation } } +// AnnouncementClient is a client for the Announcement schema. +type AnnouncementClient struct { + config +} + +// NewAnnouncementClient returns a client for the Announcement from the given config. +func NewAnnouncementClient(c config) *AnnouncementClient { + return &AnnouncementClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `announcement.Hooks(f(g(h())))`. +func (c *AnnouncementClient) Use(hooks ...Hook) { + c.hooks.Announcement = append(c.hooks.Announcement, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `announcement.Intercept(f(g(h())))`. +func (c *AnnouncementClient) Intercept(interceptors ...Interceptor) { + c.inters.Announcement = append(c.inters.Announcement, interceptors...) +} + +// Create returns a builder for creating a Announcement entity. +func (c *AnnouncementClient) Create() *AnnouncementCreate { + mutation := newAnnouncementMutation(c.config, OpCreate) + return &AnnouncementCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Announcement entities. +func (c *AnnouncementClient) CreateBulk(builders ...*AnnouncementCreate) *AnnouncementCreateBulk { + return &AnnouncementCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AnnouncementClient) MapCreateBulk(slice any, setFunc func(*AnnouncementCreate, int)) *AnnouncementCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AnnouncementCreateBulk{err: fmt.Errorf("calling to AnnouncementClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AnnouncementCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AnnouncementCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Announcement. +func (c *AnnouncementClient) Update() *AnnouncementUpdate { + mutation := newAnnouncementMutation(c.config, OpUpdate) + return &AnnouncementUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AnnouncementClient) UpdateOne(_m *Announcement) *AnnouncementUpdateOne { + mutation := newAnnouncementMutation(c.config, OpUpdateOne, withAnnouncement(_m)) + return &AnnouncementUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AnnouncementClient) UpdateOneID(id int64) *AnnouncementUpdateOne { + mutation := newAnnouncementMutation(c.config, OpUpdateOne, withAnnouncementID(id)) + return &AnnouncementUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Announcement. +func (c *AnnouncementClient) Delete() *AnnouncementDelete { + mutation := newAnnouncementMutation(c.config, OpDelete) + return &AnnouncementDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AnnouncementClient) DeleteOne(_m *Announcement) *AnnouncementDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AnnouncementClient) DeleteOneID(id int64) *AnnouncementDeleteOne { + builder := c.Delete().Where(announcement.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AnnouncementDeleteOne{builder} +} + +// Query returns a query builder for Announcement. +func (c *AnnouncementClient) Query() *AnnouncementQuery { + return &AnnouncementQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAnnouncement}, + inters: c.Interceptors(), + } +} + +// Get returns a Announcement entity by its id. +func (c *AnnouncementClient) Get(ctx context.Context, id int64) (*Announcement, error) { + return c.Query().Where(announcement.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AnnouncementClient) GetX(ctx context.Context, id int64) *Announcement { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryReads queries the reads edge of a Announcement. +func (c *AnnouncementClient) QueryReads(_m *Announcement) *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(announcement.Table, announcement.FieldID, id), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, announcement.ReadsTable, announcement.ReadsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AnnouncementClient) Hooks() []Hook { + return c.hooks.Announcement +} + +// Interceptors returns the client interceptors. +func (c *AnnouncementClient) Interceptors() []Interceptor { + return c.inters.Announcement +} + +func (c *AnnouncementClient) mutate(ctx context.Context, m *AnnouncementMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AnnouncementCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AnnouncementUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AnnouncementUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AnnouncementDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Announcement mutation op: %q", m.Op()) + } +} + +// AnnouncementReadClient is a client for the AnnouncementRead schema. +type AnnouncementReadClient struct { + config +} + +// NewAnnouncementReadClient returns a client for the AnnouncementRead from the given config. +func NewAnnouncementReadClient(c config) *AnnouncementReadClient { + return &AnnouncementReadClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `announcementread.Hooks(f(g(h())))`. +func (c *AnnouncementReadClient) Use(hooks ...Hook) { + c.hooks.AnnouncementRead = append(c.hooks.AnnouncementRead, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `announcementread.Intercept(f(g(h())))`. +func (c *AnnouncementReadClient) Intercept(interceptors ...Interceptor) { + c.inters.AnnouncementRead = append(c.inters.AnnouncementRead, interceptors...) +} + +// Create returns a builder for creating a AnnouncementRead entity. +func (c *AnnouncementReadClient) Create() *AnnouncementReadCreate { + mutation := newAnnouncementReadMutation(c.config, OpCreate) + return &AnnouncementReadCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AnnouncementRead entities. +func (c *AnnouncementReadClient) CreateBulk(builders ...*AnnouncementReadCreate) *AnnouncementReadCreateBulk { + return &AnnouncementReadCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AnnouncementReadClient) MapCreateBulk(slice any, setFunc func(*AnnouncementReadCreate, int)) *AnnouncementReadCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AnnouncementReadCreateBulk{err: fmt.Errorf("calling to AnnouncementReadClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AnnouncementReadCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AnnouncementReadCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AnnouncementRead. +func (c *AnnouncementReadClient) Update() *AnnouncementReadUpdate { + mutation := newAnnouncementReadMutation(c.config, OpUpdate) + return &AnnouncementReadUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AnnouncementReadClient) UpdateOne(_m *AnnouncementRead) *AnnouncementReadUpdateOne { + mutation := newAnnouncementReadMutation(c.config, OpUpdateOne, withAnnouncementRead(_m)) + return &AnnouncementReadUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AnnouncementReadClient) UpdateOneID(id int64) *AnnouncementReadUpdateOne { + mutation := newAnnouncementReadMutation(c.config, OpUpdateOne, withAnnouncementReadID(id)) + return &AnnouncementReadUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AnnouncementRead. +func (c *AnnouncementReadClient) Delete() *AnnouncementReadDelete { + mutation := newAnnouncementReadMutation(c.config, OpDelete) + return &AnnouncementReadDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AnnouncementReadClient) DeleteOne(_m *AnnouncementRead) *AnnouncementReadDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AnnouncementReadClient) DeleteOneID(id int64) *AnnouncementReadDeleteOne { + builder := c.Delete().Where(announcementread.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AnnouncementReadDeleteOne{builder} +} + +// Query returns a query builder for AnnouncementRead. +func (c *AnnouncementReadClient) Query() *AnnouncementReadQuery { + return &AnnouncementReadQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAnnouncementRead}, + inters: c.Interceptors(), + } +} + +// Get returns a AnnouncementRead entity by its id. +func (c *AnnouncementReadClient) Get(ctx context.Context, id int64) (*AnnouncementRead, error) { + return c.Query().Where(announcementread.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AnnouncementReadClient) GetX(ctx context.Context, id int64) *AnnouncementRead { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAnnouncement queries the announcement edge of a AnnouncementRead. +func (c *AnnouncementReadClient) QueryAnnouncement(_m *AnnouncementRead) *AnnouncementQuery { + query := (&AnnouncementClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, id), + sqlgraph.To(announcement.Table, announcement.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.AnnouncementTable, announcementread.AnnouncementColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUser queries the user edge of a AnnouncementRead. +func (c *AnnouncementReadClient) QueryUser(_m *AnnouncementRead) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.UserTable, announcementread.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AnnouncementReadClient) Hooks() []Hook { + return c.hooks.AnnouncementRead +} + +// Interceptors returns the client interceptors. +func (c *AnnouncementReadClient) Interceptors() []Interceptor { + return c.inters.AnnouncementRead +} + +func (c *AnnouncementReadClient) mutate(ctx context.Context, m *AnnouncementReadMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AnnouncementReadCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AnnouncementReadUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AnnouncementReadUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AnnouncementReadDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AnnouncementRead mutation op: %q", m.Op()) + } +} + // GroupClient is a client for the Group schema. type GroupClient struct { config @@ -2375,6 +2705,22 @@ func (c *UserClient) QueryAssignedSubscriptions(_m *User) *UserSubscriptionQuery return query } +// QueryAnnouncementReads queries the announcement_reads edge of a User. +func (c *UserClient) QueryAnnouncementReads(_m *User) *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AnnouncementReadsTable, user.AnnouncementReadsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + // QueryAllowedGroups queries the allowed_groups edge of a User. func (c *UserClient) QueryAllowedGroups(_m *User) *GroupQuery { query := (&GroupClient{config: c.config}).Query() @@ -3116,14 +3462,16 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription // hooks and interceptors per client, for fast access. type ( hooks struct { - APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, - UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Hook + APIKey, Account, AccountGroup, Announcement, AnnouncementRead, Group, PromoCode, + PromoCodeUsage, Proxy, RedeemCode, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Hook } inters struct { - APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, - UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Interceptor + APIKey, Account, AccountGroup, Announcement, AnnouncementRead, Group, PromoCode, + PromoCodeUsage, Proxy, RedeemCode, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index 4bcc2642..05e30ba7 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -14,6 +14,8 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocode" @@ -91,6 +93,8 @@ func checkColumn(t, c string) error { apikey.Table: apikey.ValidColumn, account.Table: account.ValidColumn, accountgroup.Table: accountgroup.ValidColumn, + announcement.Table: announcement.ValidColumn, + announcementread.Table: announcementread.ValidColumn, group.Table: group.ValidColumn, promocode.Table: promocode.ValidColumn, promocodeusage.Table: promocodeusage.ValidColumn, diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index edd84f5e..1e653c77 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -45,6 +45,30 @@ func (f AccountGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountGroupMutation", m) } +// The AnnouncementFunc type is an adapter to allow the use of ordinary +// function as Announcement mutator. +type AnnouncementFunc func(context.Context, *ent.AnnouncementMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AnnouncementFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AnnouncementMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AnnouncementMutation", m) +} + +// The AnnouncementReadFunc type is an adapter to allow the use of ordinary +// function as AnnouncementRead mutator. +type AnnouncementReadFunc func(context.Context, *ent.AnnouncementReadMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AnnouncementReadFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AnnouncementReadMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AnnouncementReadMutation", m) +} + // The GroupFunc type is an adapter to allow the use of ordinary // function as Group mutator. type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error) diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index f18c0624..a37be48f 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -10,6 +10,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -164,6 +166,60 @@ func (f TraverseAccountGroup) Traverse(ctx context.Context, q ent.Query) error { return fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q) } +// The AnnouncementFunc type is an adapter to allow the use of ordinary function as a Querier. +type AnnouncementFunc func(context.Context, *ent.AnnouncementQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AnnouncementFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AnnouncementQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementQuery", q) +} + +// The TraverseAnnouncement type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAnnouncement func(context.Context, *ent.AnnouncementQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAnnouncement) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAnnouncement) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AnnouncementQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementQuery", q) +} + +// The AnnouncementReadFunc type is an adapter to allow the use of ordinary function as a Querier. +type AnnouncementReadFunc func(context.Context, *ent.AnnouncementReadQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AnnouncementReadFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AnnouncementReadQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementReadQuery", q) +} + +// The TraverseAnnouncementRead type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAnnouncementRead func(context.Context, *ent.AnnouncementReadQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAnnouncementRead) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAnnouncementRead) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AnnouncementReadQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementReadQuery", q) +} + // The GroupFunc type is an adapter to allow the use of ordinary function as a Querier. type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error) @@ -524,6 +580,10 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.AccountQuery, predicate.Account, account.OrderOption]{typ: ent.TypeAccount, tq: q}, nil case *ent.AccountGroupQuery: return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil + case *ent.AnnouncementQuery: + return &query[*ent.AnnouncementQuery, predicate.Announcement, announcement.OrderOption]{typ: ent.TypeAnnouncement, tq: q}, nil + case *ent.AnnouncementReadQuery: + return &query[*ent.AnnouncementReadQuery, predicate.AnnouncementRead, announcementread.OrderOption]{typ: ent.TypeAnnouncementRead, tq: q}, nil case *ent.GroupQuery: return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil case *ent.PromoCodeQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index d2a39331..e2ed7340 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -204,6 +204,98 @@ var ( }, }, } + // AnnouncementsColumns holds the columns for the "announcements" table. + AnnouncementsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "title", Type: field.TypeString, Size: 200}, + {Name: "content", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "draft"}, + {Name: "targeting", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "starts_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "ends_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "created_by", Type: field.TypeInt64, Nullable: true}, + {Name: "updated_by", Type: field.TypeInt64, Nullable: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + } + // AnnouncementsTable holds the schema information for the "announcements" table. + AnnouncementsTable = &schema.Table{ + Name: "announcements", + Columns: AnnouncementsColumns, + PrimaryKey: []*schema.Column{AnnouncementsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "announcement_status", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[3]}, + }, + { + Name: "announcement_created_at", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[9]}, + }, + { + Name: "announcement_starts_at", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[5]}, + }, + { + Name: "announcement_ends_at", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[6]}, + }, + }, + } + // AnnouncementReadsColumns holds the columns for the "announcement_reads" table. + AnnouncementReadsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "read_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "announcement_id", Type: field.TypeInt64}, + {Name: "user_id", Type: field.TypeInt64}, + } + // AnnouncementReadsTable holds the schema information for the "announcement_reads" table. + AnnouncementReadsTable = &schema.Table{ + Name: "announcement_reads", + Columns: AnnouncementReadsColumns, + PrimaryKey: []*schema.Column{AnnouncementReadsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "announcement_reads_announcements_reads", + Columns: []*schema.Column{AnnouncementReadsColumns[3]}, + RefColumns: []*schema.Column{AnnouncementsColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "announcement_reads_users_announcement_reads", + Columns: []*schema.Column{AnnouncementReadsColumns[4]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "announcementread_announcement_id", + Unique: false, + Columns: []*schema.Column{AnnouncementReadsColumns[3]}, + }, + { + Name: "announcementread_user_id", + Unique: false, + Columns: []*schema.Column{AnnouncementReadsColumns[4]}, + }, + { + Name: "announcementread_read_at", + Unique: false, + Columns: []*schema.Column{AnnouncementReadsColumns[1]}, + }, + { + Name: "announcementread_announcement_id_user_id", + Unique: true, + Columns: []*schema.Column{AnnouncementReadsColumns[3], AnnouncementReadsColumns[4]}, + }, + }, + } // GroupsColumns holds the columns for the "groups" table. GroupsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -840,6 +932,8 @@ var ( APIKeysTable, AccountsTable, AccountGroupsTable, + AnnouncementsTable, + AnnouncementReadsTable, GroupsTable, PromoCodesTable, PromoCodeUsagesTable, @@ -871,6 +965,14 @@ func init() { AccountGroupsTable.Annotation = &entsql.Annotation{ Table: "account_groups", } + AnnouncementsTable.Annotation = &entsql.Annotation{ + Table: "announcements", + } + AnnouncementReadsTable.ForeignKeys[0].RefTable = AnnouncementsTable + AnnouncementReadsTable.ForeignKeys[1].RefTable = UsersTable + AnnouncementReadsTable.Annotation = &entsql.Annotation{ + Table: "announcement_reads", + } GroupsTable.Annotation = &entsql.Annotation{ Table: "groups", } diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 7f3071c2..38e0c7e5 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -14,6 +14,8 @@ import ( "entgo.io/ent/dialect/sql" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -29,6 +31,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" "github.com/Wei-Shaw/sub2api/ent/userattributevalue" "github.com/Wei-Shaw/sub2api/ent/usersubscription" + "github.com/Wei-Shaw/sub2api/internal/domain" ) const ( @@ -43,6 +46,8 @@ const ( TypeAPIKey = "APIKey" TypeAccount = "Account" TypeAccountGroup = "AccountGroup" + TypeAnnouncement = "Announcement" + TypeAnnouncementRead = "AnnouncementRead" TypeGroup = "Group" TypePromoCode = "PromoCode" TypePromoCodeUsage = "PromoCodeUsage" @@ -3833,6 +3838,1671 @@ func (m *AccountGroupMutation) ResetEdge(name string) error { return fmt.Errorf("unknown AccountGroup edge %s", name) } +// AnnouncementMutation represents an operation that mutates the Announcement nodes in the graph. +type AnnouncementMutation struct { + config + op Op + typ string + id *int64 + title *string + content *string + status *string + targeting *domain.AnnouncementTargeting + starts_at *time.Time + ends_at *time.Time + created_by *int64 + addcreated_by *int64 + updated_by *int64 + addupdated_by *int64 + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + reads map[int64]struct{} + removedreads map[int64]struct{} + clearedreads bool + done bool + oldValue func(context.Context) (*Announcement, error) + predicates []predicate.Announcement +} + +var _ ent.Mutation = (*AnnouncementMutation)(nil) + +// announcementOption allows management of the mutation configuration using functional options. +type announcementOption func(*AnnouncementMutation) + +// newAnnouncementMutation creates new mutation for the Announcement entity. +func newAnnouncementMutation(c config, op Op, opts ...announcementOption) *AnnouncementMutation { + m := &AnnouncementMutation{ + config: c, + op: op, + typ: TypeAnnouncement, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAnnouncementID sets the ID field of the mutation. +func withAnnouncementID(id int64) announcementOption { + return func(m *AnnouncementMutation) { + var ( + err error + once sync.Once + value *Announcement + ) + m.oldValue = func(ctx context.Context) (*Announcement, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Announcement.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAnnouncement sets the old Announcement of the mutation. +func withAnnouncement(node *Announcement) announcementOption { + return func(m *AnnouncementMutation) { + m.oldValue = func(context.Context) (*Announcement, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AnnouncementMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AnnouncementMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AnnouncementMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AnnouncementMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Announcement.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetTitle sets the "title" field. +func (m *AnnouncementMutation) SetTitle(s string) { + m.title = &s +} + +// Title returns the value of the "title" field in the mutation. +func (m *AnnouncementMutation) Title() (r string, exists bool) { + v := m.title + if v == nil { + return + } + return *v, true +} + +// OldTitle returns the old "title" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldTitle(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTitle is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTitle requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTitle: %w", err) + } + return oldValue.Title, nil +} + +// ResetTitle resets all changes to the "title" field. +func (m *AnnouncementMutation) ResetTitle() { + m.title = nil +} + +// SetContent sets the "content" field. +func (m *AnnouncementMutation) SetContent(s string) { + m.content = &s +} + +// Content returns the value of the "content" field in the mutation. +func (m *AnnouncementMutation) Content() (r string, exists bool) { + v := m.content + if v == nil { + return + } + return *v, true +} + +// OldContent returns the old "content" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldContent(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldContent is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldContent requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldContent: %w", err) + } + return oldValue.Content, nil +} + +// ResetContent resets all changes to the "content" field. +func (m *AnnouncementMutation) ResetContent() { + m.content = nil +} + +// SetStatus sets the "status" field. +func (m *AnnouncementMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *AnnouncementMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *AnnouncementMutation) ResetStatus() { + m.status = nil +} + +// SetTargeting sets the "targeting" field. +func (m *AnnouncementMutation) SetTargeting(dt domain.AnnouncementTargeting) { + m.targeting = &dt +} + +// Targeting returns the value of the "targeting" field in the mutation. +func (m *AnnouncementMutation) Targeting() (r domain.AnnouncementTargeting, exists bool) { + v := m.targeting + if v == nil { + return + } + return *v, true +} + +// OldTargeting returns the old "targeting" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldTargeting(ctx context.Context) (v domain.AnnouncementTargeting, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTargeting is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTargeting requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTargeting: %w", err) + } + return oldValue.Targeting, nil +} + +// ClearTargeting clears the value of the "targeting" field. +func (m *AnnouncementMutation) ClearTargeting() { + m.targeting = nil + m.clearedFields[announcement.FieldTargeting] = struct{}{} +} + +// TargetingCleared returns if the "targeting" field was cleared in this mutation. +func (m *AnnouncementMutation) TargetingCleared() bool { + _, ok := m.clearedFields[announcement.FieldTargeting] + return ok +} + +// ResetTargeting resets all changes to the "targeting" field. +func (m *AnnouncementMutation) ResetTargeting() { + m.targeting = nil + delete(m.clearedFields, announcement.FieldTargeting) +} + +// SetStartsAt sets the "starts_at" field. +func (m *AnnouncementMutation) SetStartsAt(t time.Time) { + m.starts_at = &t +} + +// StartsAt returns the value of the "starts_at" field in the mutation. +func (m *AnnouncementMutation) StartsAt() (r time.Time, exists bool) { + v := m.starts_at + if v == nil { + return + } + return *v, true +} + +// OldStartsAt returns the old "starts_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldStartsAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartsAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartsAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartsAt: %w", err) + } + return oldValue.StartsAt, nil +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (m *AnnouncementMutation) ClearStartsAt() { + m.starts_at = nil + m.clearedFields[announcement.FieldStartsAt] = struct{}{} +} + +// StartsAtCleared returns if the "starts_at" field was cleared in this mutation. +func (m *AnnouncementMutation) StartsAtCleared() bool { + _, ok := m.clearedFields[announcement.FieldStartsAt] + return ok +} + +// ResetStartsAt resets all changes to the "starts_at" field. +func (m *AnnouncementMutation) ResetStartsAt() { + m.starts_at = nil + delete(m.clearedFields, announcement.FieldStartsAt) +} + +// SetEndsAt sets the "ends_at" field. +func (m *AnnouncementMutation) SetEndsAt(t time.Time) { + m.ends_at = &t +} + +// EndsAt returns the value of the "ends_at" field in the mutation. +func (m *AnnouncementMutation) EndsAt() (r time.Time, exists bool) { + v := m.ends_at + if v == nil { + return + } + return *v, true +} + +// OldEndsAt returns the old "ends_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldEndsAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndsAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndsAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndsAt: %w", err) + } + return oldValue.EndsAt, nil +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (m *AnnouncementMutation) ClearEndsAt() { + m.ends_at = nil + m.clearedFields[announcement.FieldEndsAt] = struct{}{} +} + +// EndsAtCleared returns if the "ends_at" field was cleared in this mutation. +func (m *AnnouncementMutation) EndsAtCleared() bool { + _, ok := m.clearedFields[announcement.FieldEndsAt] + return ok +} + +// ResetEndsAt resets all changes to the "ends_at" field. +func (m *AnnouncementMutation) ResetEndsAt() { + m.ends_at = nil + delete(m.clearedFields, announcement.FieldEndsAt) +} + +// SetCreatedBy sets the "created_by" field. +func (m *AnnouncementMutation) SetCreatedBy(i int64) { + m.created_by = &i + m.addcreated_by = nil +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *AnnouncementMutation) CreatedBy() (r int64, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldCreatedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// AddCreatedBy adds i to the "created_by" field. +func (m *AnnouncementMutation) AddCreatedBy(i int64) { + if m.addcreated_by != nil { + *m.addcreated_by += i + } else { + m.addcreated_by = &i + } +} + +// AddedCreatedBy returns the value that was added to the "created_by" field in this mutation. +func (m *AnnouncementMutation) AddedCreatedBy() (r int64, exists bool) { + v := m.addcreated_by + if v == nil { + return + } + return *v, true +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (m *AnnouncementMutation) ClearCreatedBy() { + m.created_by = nil + m.addcreated_by = nil + m.clearedFields[announcement.FieldCreatedBy] = struct{}{} +} + +// CreatedByCleared returns if the "created_by" field was cleared in this mutation. +func (m *AnnouncementMutation) CreatedByCleared() bool { + _, ok := m.clearedFields[announcement.FieldCreatedBy] + return ok +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *AnnouncementMutation) ResetCreatedBy() { + m.created_by = nil + m.addcreated_by = nil + delete(m.clearedFields, announcement.FieldCreatedBy) +} + +// SetUpdatedBy sets the "updated_by" field. +func (m *AnnouncementMutation) SetUpdatedBy(i int64) { + m.updated_by = &i + m.addupdated_by = nil +} + +// UpdatedBy returns the value of the "updated_by" field in the mutation. +func (m *AnnouncementMutation) UpdatedBy() (r int64, exists bool) { + v := m.updated_by + if v == nil { + return + } + return *v, true +} + +// OldUpdatedBy returns the old "updated_by" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldUpdatedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedBy: %w", err) + } + return oldValue.UpdatedBy, nil +} + +// AddUpdatedBy adds i to the "updated_by" field. +func (m *AnnouncementMutation) AddUpdatedBy(i int64) { + if m.addupdated_by != nil { + *m.addupdated_by += i + } else { + m.addupdated_by = &i + } +} + +// AddedUpdatedBy returns the value that was added to the "updated_by" field in this mutation. +func (m *AnnouncementMutation) AddedUpdatedBy() (r int64, exists bool) { + v := m.addupdated_by + if v == nil { + return + } + return *v, true +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (m *AnnouncementMutation) ClearUpdatedBy() { + m.updated_by = nil + m.addupdated_by = nil + m.clearedFields[announcement.FieldUpdatedBy] = struct{}{} +} + +// UpdatedByCleared returns if the "updated_by" field was cleared in this mutation. +func (m *AnnouncementMutation) UpdatedByCleared() bool { + _, ok := m.clearedFields[announcement.FieldUpdatedBy] + return ok +} + +// ResetUpdatedBy resets all changes to the "updated_by" field. +func (m *AnnouncementMutation) ResetUpdatedBy() { + m.updated_by = nil + m.addupdated_by = nil + delete(m.clearedFields, announcement.FieldUpdatedBy) +} + +// SetCreatedAt sets the "created_at" field. +func (m *AnnouncementMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AnnouncementMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AnnouncementMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AnnouncementMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AnnouncementMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AnnouncementMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by ids. +func (m *AnnouncementMutation) AddReadIDs(ids ...int64) { + if m.reads == nil { + m.reads = make(map[int64]struct{}) + } + for i := range ids { + m.reads[ids[i]] = struct{}{} + } +} + +// ClearReads clears the "reads" edge to the AnnouncementRead entity. +func (m *AnnouncementMutation) ClearReads() { + m.clearedreads = true +} + +// ReadsCleared reports if the "reads" edge to the AnnouncementRead entity was cleared. +func (m *AnnouncementMutation) ReadsCleared() bool { + return m.clearedreads +} + +// RemoveReadIDs removes the "reads" edge to the AnnouncementRead entity by IDs. +func (m *AnnouncementMutation) RemoveReadIDs(ids ...int64) { + if m.removedreads == nil { + m.removedreads = make(map[int64]struct{}) + } + for i := range ids { + delete(m.reads, ids[i]) + m.removedreads[ids[i]] = struct{}{} + } +} + +// RemovedReads returns the removed IDs of the "reads" edge to the AnnouncementRead entity. +func (m *AnnouncementMutation) RemovedReadsIDs() (ids []int64) { + for id := range m.removedreads { + ids = append(ids, id) + } + return +} + +// ReadsIDs returns the "reads" edge IDs in the mutation. +func (m *AnnouncementMutation) ReadsIDs() (ids []int64) { + for id := range m.reads { + ids = append(ids, id) + } + return +} + +// ResetReads resets all changes to the "reads" edge. +func (m *AnnouncementMutation) ResetReads() { + m.reads = nil + m.clearedreads = false + m.removedreads = nil +} + +// Where appends a list predicates to the AnnouncementMutation builder. +func (m *AnnouncementMutation) Where(ps ...predicate.Announcement) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AnnouncementMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AnnouncementMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Announcement, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AnnouncementMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AnnouncementMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Announcement). +func (m *AnnouncementMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AnnouncementMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.title != nil { + fields = append(fields, announcement.FieldTitle) + } + if m.content != nil { + fields = append(fields, announcement.FieldContent) + } + if m.status != nil { + fields = append(fields, announcement.FieldStatus) + } + if m.targeting != nil { + fields = append(fields, announcement.FieldTargeting) + } + if m.starts_at != nil { + fields = append(fields, announcement.FieldStartsAt) + } + if m.ends_at != nil { + fields = append(fields, announcement.FieldEndsAt) + } + if m.created_by != nil { + fields = append(fields, announcement.FieldCreatedBy) + } + if m.updated_by != nil { + fields = append(fields, announcement.FieldUpdatedBy) + } + if m.created_at != nil { + fields = append(fields, announcement.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, announcement.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AnnouncementMutation) Field(name string) (ent.Value, bool) { + switch name { + case announcement.FieldTitle: + return m.Title() + case announcement.FieldContent: + return m.Content() + case announcement.FieldStatus: + return m.Status() + case announcement.FieldTargeting: + return m.Targeting() + case announcement.FieldStartsAt: + return m.StartsAt() + case announcement.FieldEndsAt: + return m.EndsAt() + case announcement.FieldCreatedBy: + return m.CreatedBy() + case announcement.FieldUpdatedBy: + return m.UpdatedBy() + case announcement.FieldCreatedAt: + return m.CreatedAt() + case announcement.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AnnouncementMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case announcement.FieldTitle: + return m.OldTitle(ctx) + case announcement.FieldContent: + return m.OldContent(ctx) + case announcement.FieldStatus: + return m.OldStatus(ctx) + case announcement.FieldTargeting: + return m.OldTargeting(ctx) + case announcement.FieldStartsAt: + return m.OldStartsAt(ctx) + case announcement.FieldEndsAt: + return m.OldEndsAt(ctx) + case announcement.FieldCreatedBy: + return m.OldCreatedBy(ctx) + case announcement.FieldUpdatedBy: + return m.OldUpdatedBy(ctx) + case announcement.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case announcement.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Announcement field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementMutation) SetField(name string, value ent.Value) error { + switch name { + case announcement.FieldTitle: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTitle(v) + return nil + case announcement.FieldContent: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetContent(v) + return nil + case announcement.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case announcement.FieldTargeting: + v, ok := value.(domain.AnnouncementTargeting) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTargeting(v) + return nil + case announcement.FieldStartsAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartsAt(v) + return nil + case announcement.FieldEndsAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndsAt(v) + return nil + case announcement.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + case announcement.FieldUpdatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedBy(v) + return nil + case announcement.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case announcement.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Announcement field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AnnouncementMutation) AddedFields() []string { + var fields []string + if m.addcreated_by != nil { + fields = append(fields, announcement.FieldCreatedBy) + } + if m.addupdated_by != nil { + fields = append(fields, announcement.FieldUpdatedBy) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AnnouncementMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case announcement.FieldCreatedBy: + return m.AddedCreatedBy() + case announcement.FieldUpdatedBy: + return m.AddedUpdatedBy() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementMutation) AddField(name string, value ent.Value) error { + switch name { + case announcement.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCreatedBy(v) + return nil + case announcement.FieldUpdatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddUpdatedBy(v) + return nil + } + return fmt.Errorf("unknown Announcement numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AnnouncementMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(announcement.FieldTargeting) { + fields = append(fields, announcement.FieldTargeting) + } + if m.FieldCleared(announcement.FieldStartsAt) { + fields = append(fields, announcement.FieldStartsAt) + } + if m.FieldCleared(announcement.FieldEndsAt) { + fields = append(fields, announcement.FieldEndsAt) + } + if m.FieldCleared(announcement.FieldCreatedBy) { + fields = append(fields, announcement.FieldCreatedBy) + } + if m.FieldCleared(announcement.FieldUpdatedBy) { + fields = append(fields, announcement.FieldUpdatedBy) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AnnouncementMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AnnouncementMutation) ClearField(name string) error { + switch name { + case announcement.FieldTargeting: + m.ClearTargeting() + return nil + case announcement.FieldStartsAt: + m.ClearStartsAt() + return nil + case announcement.FieldEndsAt: + m.ClearEndsAt() + return nil + case announcement.FieldCreatedBy: + m.ClearCreatedBy() + return nil + case announcement.FieldUpdatedBy: + m.ClearUpdatedBy() + return nil + } + return fmt.Errorf("unknown Announcement nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AnnouncementMutation) ResetField(name string) error { + switch name { + case announcement.FieldTitle: + m.ResetTitle() + return nil + case announcement.FieldContent: + m.ResetContent() + return nil + case announcement.FieldStatus: + m.ResetStatus() + return nil + case announcement.FieldTargeting: + m.ResetTargeting() + return nil + case announcement.FieldStartsAt: + m.ResetStartsAt() + return nil + case announcement.FieldEndsAt: + m.ResetEndsAt() + return nil + case announcement.FieldCreatedBy: + m.ResetCreatedBy() + return nil + case announcement.FieldUpdatedBy: + m.ResetUpdatedBy() + return nil + case announcement.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case announcement.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Announcement field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AnnouncementMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.reads != nil { + edges = append(edges, announcement.EdgeReads) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AnnouncementMutation) AddedIDs(name string) []ent.Value { + switch name { + case announcement.EdgeReads: + ids := make([]ent.Value, 0, len(m.reads)) + for id := range m.reads { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AnnouncementMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedreads != nil { + edges = append(edges, announcement.EdgeReads) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AnnouncementMutation) RemovedIDs(name string) []ent.Value { + switch name { + case announcement.EdgeReads: + ids := make([]ent.Value, 0, len(m.removedreads)) + for id := range m.removedreads { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AnnouncementMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedreads { + edges = append(edges, announcement.EdgeReads) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AnnouncementMutation) EdgeCleared(name string) bool { + switch name { + case announcement.EdgeReads: + return m.clearedreads + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AnnouncementMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Announcement unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AnnouncementMutation) ResetEdge(name string) error { + switch name { + case announcement.EdgeReads: + m.ResetReads() + return nil + } + return fmt.Errorf("unknown Announcement edge %s", name) +} + +// AnnouncementReadMutation represents an operation that mutates the AnnouncementRead nodes in the graph. +type AnnouncementReadMutation struct { + config + op Op + typ string + id *int64 + read_at *time.Time + created_at *time.Time + clearedFields map[string]struct{} + announcement *int64 + clearedannouncement bool + user *int64 + cleareduser bool + done bool + oldValue func(context.Context) (*AnnouncementRead, error) + predicates []predicate.AnnouncementRead +} + +var _ ent.Mutation = (*AnnouncementReadMutation)(nil) + +// announcementreadOption allows management of the mutation configuration using functional options. +type announcementreadOption func(*AnnouncementReadMutation) + +// newAnnouncementReadMutation creates new mutation for the AnnouncementRead entity. +func newAnnouncementReadMutation(c config, op Op, opts ...announcementreadOption) *AnnouncementReadMutation { + m := &AnnouncementReadMutation{ + config: c, + op: op, + typ: TypeAnnouncementRead, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAnnouncementReadID sets the ID field of the mutation. +func withAnnouncementReadID(id int64) announcementreadOption { + return func(m *AnnouncementReadMutation) { + var ( + err error + once sync.Once + value *AnnouncementRead + ) + m.oldValue = func(ctx context.Context) (*AnnouncementRead, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().AnnouncementRead.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAnnouncementRead sets the old AnnouncementRead of the mutation. +func withAnnouncementRead(node *AnnouncementRead) announcementreadOption { + return func(m *AnnouncementReadMutation) { + m.oldValue = func(context.Context) (*AnnouncementRead, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AnnouncementReadMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AnnouncementReadMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AnnouncementReadMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AnnouncementReadMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().AnnouncementRead.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetAnnouncementID sets the "announcement_id" field. +func (m *AnnouncementReadMutation) SetAnnouncementID(i int64) { + m.announcement = &i +} + +// AnnouncementID returns the value of the "announcement_id" field in the mutation. +func (m *AnnouncementReadMutation) AnnouncementID() (r int64, exists bool) { + v := m.announcement + if v == nil { + return + } + return *v, true +} + +// OldAnnouncementID returns the old "announcement_id" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldAnnouncementID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAnnouncementID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAnnouncementID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAnnouncementID: %w", err) + } + return oldValue.AnnouncementID, nil +} + +// ResetAnnouncementID resets all changes to the "announcement_id" field. +func (m *AnnouncementReadMutation) ResetAnnouncementID() { + m.announcement = nil +} + +// SetUserID sets the "user_id" field. +func (m *AnnouncementReadMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *AnnouncementReadMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *AnnouncementReadMutation) ResetUserID() { + m.user = nil +} + +// SetReadAt sets the "read_at" field. +func (m *AnnouncementReadMutation) SetReadAt(t time.Time) { + m.read_at = &t +} + +// ReadAt returns the value of the "read_at" field in the mutation. +func (m *AnnouncementReadMutation) ReadAt() (r time.Time, exists bool) { + v := m.read_at + if v == nil { + return + } + return *v, true +} + +// OldReadAt returns the old "read_at" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldReadAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldReadAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldReadAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldReadAt: %w", err) + } + return oldValue.ReadAt, nil +} + +// ResetReadAt resets all changes to the "read_at" field. +func (m *AnnouncementReadMutation) ResetReadAt() { + m.read_at = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *AnnouncementReadMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AnnouncementReadMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AnnouncementReadMutation) ResetCreatedAt() { + m.created_at = nil +} + +// ClearAnnouncement clears the "announcement" edge to the Announcement entity. +func (m *AnnouncementReadMutation) ClearAnnouncement() { + m.clearedannouncement = true + m.clearedFields[announcementread.FieldAnnouncementID] = struct{}{} +} + +// AnnouncementCleared reports if the "announcement" edge to the Announcement entity was cleared. +func (m *AnnouncementReadMutation) AnnouncementCleared() bool { + return m.clearedannouncement +} + +// AnnouncementIDs returns the "announcement" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AnnouncementID instead. It exists only for internal usage by the builders. +func (m *AnnouncementReadMutation) AnnouncementIDs() (ids []int64) { + if id := m.announcement; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAnnouncement resets all changes to the "announcement" edge. +func (m *AnnouncementReadMutation) ResetAnnouncement() { + m.announcement = nil + m.clearedannouncement = false +} + +// ClearUser clears the "user" edge to the User entity. +func (m *AnnouncementReadMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[announcementread.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *AnnouncementReadMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *AnnouncementReadMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *AnnouncementReadMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// Where appends a list predicates to the AnnouncementReadMutation builder. +func (m *AnnouncementReadMutation) Where(ps ...predicate.AnnouncementRead) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AnnouncementReadMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AnnouncementReadMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AnnouncementRead, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AnnouncementReadMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AnnouncementReadMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AnnouncementRead). +func (m *AnnouncementReadMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AnnouncementReadMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.announcement != nil { + fields = append(fields, announcementread.FieldAnnouncementID) + } + if m.user != nil { + fields = append(fields, announcementread.FieldUserID) + } + if m.read_at != nil { + fields = append(fields, announcementread.FieldReadAt) + } + if m.created_at != nil { + fields = append(fields, announcementread.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AnnouncementReadMutation) Field(name string) (ent.Value, bool) { + switch name { + case announcementread.FieldAnnouncementID: + return m.AnnouncementID() + case announcementread.FieldUserID: + return m.UserID() + case announcementread.FieldReadAt: + return m.ReadAt() + case announcementread.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AnnouncementReadMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case announcementread.FieldAnnouncementID: + return m.OldAnnouncementID(ctx) + case announcementread.FieldUserID: + return m.OldUserID(ctx) + case announcementread.FieldReadAt: + return m.OldReadAt(ctx) + case announcementread.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown AnnouncementRead field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementReadMutation) SetField(name string, value ent.Value) error { + switch name { + case announcementread.FieldAnnouncementID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAnnouncementID(v) + return nil + case announcementread.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case announcementread.FieldReadAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetReadAt(v) + return nil + case announcementread.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown AnnouncementRead field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AnnouncementReadMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AnnouncementReadMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementReadMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown AnnouncementRead numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AnnouncementReadMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AnnouncementReadMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AnnouncementReadMutation) ClearField(name string) error { + return fmt.Errorf("unknown AnnouncementRead nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AnnouncementReadMutation) ResetField(name string) error { + switch name { + case announcementread.FieldAnnouncementID: + m.ResetAnnouncementID() + return nil + case announcementread.FieldUserID: + m.ResetUserID() + return nil + case announcementread.FieldReadAt: + m.ResetReadAt() + return nil + case announcementread.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown AnnouncementRead field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AnnouncementReadMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.announcement != nil { + edges = append(edges, announcementread.EdgeAnnouncement) + } + if m.user != nil { + edges = append(edges, announcementread.EdgeUser) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AnnouncementReadMutation) AddedIDs(name string) []ent.Value { + switch name { + case announcementread.EdgeAnnouncement: + if id := m.announcement; id != nil { + return []ent.Value{*id} + } + case announcementread.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AnnouncementReadMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AnnouncementReadMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AnnouncementReadMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedannouncement { + edges = append(edges, announcementread.EdgeAnnouncement) + } + if m.cleareduser { + edges = append(edges, announcementread.EdgeUser) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AnnouncementReadMutation) EdgeCleared(name string) bool { + switch name { + case announcementread.EdgeAnnouncement: + return m.clearedannouncement + case announcementread.EdgeUser: + return m.cleareduser + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AnnouncementReadMutation) ClearEdge(name string) error { + switch name { + case announcementread.EdgeAnnouncement: + m.ClearAnnouncement() + return nil + case announcementread.EdgeUser: + m.ClearUser() + return nil + } + return fmt.Errorf("unknown AnnouncementRead unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AnnouncementReadMutation) ResetEdge(name string) error { + switch name { + case announcementread.EdgeAnnouncement: + m.ResetAnnouncement() + return nil + case announcementread.EdgeUser: + m.ResetUser() + return nil + } + return fmt.Errorf("unknown AnnouncementRead edge %s", name) +} + // GroupMutation represents an operation that mutates the Group nodes in the graph. type GroupMutation struct { config @@ -14376,6 +16046,9 @@ type UserMutation struct { assigned_subscriptions map[int64]struct{} removedassigned_subscriptions map[int64]struct{} clearedassigned_subscriptions bool + announcement_reads map[int64]struct{} + removedannouncement_reads map[int64]struct{} + clearedannouncement_reads bool allowed_groups map[int64]struct{} removedallowed_groups map[int64]struct{} clearedallowed_groups bool @@ -15290,6 +16963,60 @@ func (m *UserMutation) ResetAssignedSubscriptions() { m.removedassigned_subscriptions = nil } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by ids. +func (m *UserMutation) AddAnnouncementReadIDs(ids ...int64) { + if m.announcement_reads == nil { + m.announcement_reads = make(map[int64]struct{}) + } + for i := range ids { + m.announcement_reads[ids[i]] = struct{}{} + } +} + +// ClearAnnouncementReads clears the "announcement_reads" edge to the AnnouncementRead entity. +func (m *UserMutation) ClearAnnouncementReads() { + m.clearedannouncement_reads = true +} + +// AnnouncementReadsCleared reports if the "announcement_reads" edge to the AnnouncementRead entity was cleared. +func (m *UserMutation) AnnouncementReadsCleared() bool { + return m.clearedannouncement_reads +} + +// RemoveAnnouncementReadIDs removes the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (m *UserMutation) RemoveAnnouncementReadIDs(ids ...int64) { + if m.removedannouncement_reads == nil { + m.removedannouncement_reads = make(map[int64]struct{}) + } + for i := range ids { + delete(m.announcement_reads, ids[i]) + m.removedannouncement_reads[ids[i]] = struct{}{} + } +} + +// RemovedAnnouncementReads returns the removed IDs of the "announcement_reads" edge to the AnnouncementRead entity. +func (m *UserMutation) RemovedAnnouncementReadsIDs() (ids []int64) { + for id := range m.removedannouncement_reads { + ids = append(ids, id) + } + return +} + +// AnnouncementReadsIDs returns the "announcement_reads" edge IDs in the mutation. +func (m *UserMutation) AnnouncementReadsIDs() (ids []int64) { + for id := range m.announcement_reads { + ids = append(ids, id) + } + return +} + +// ResetAnnouncementReads resets all changes to the "announcement_reads" edge. +func (m *UserMutation) ResetAnnouncementReads() { + m.announcement_reads = nil + m.clearedannouncement_reads = false + m.removedannouncement_reads = nil +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by ids. func (m *UserMutation) AddAllowedGroupIDs(ids ...int64) { if m.allowed_groups == nil { @@ -15908,7 +17635,7 @@ func (m *UserMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *UserMutation) AddedEdges() []string { - edges := make([]string, 0, 8) + edges := make([]string, 0, 9) if m.api_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -15921,6 +17648,9 @@ func (m *UserMutation) AddedEdges() []string { if m.assigned_subscriptions != nil { edges = append(edges, user.EdgeAssignedSubscriptions) } + if m.announcement_reads != nil { + edges = append(edges, user.EdgeAnnouncementReads) + } if m.allowed_groups != nil { edges = append(edges, user.EdgeAllowedGroups) } @@ -15964,6 +17694,12 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeAnnouncementReads: + ids := make([]ent.Value, 0, len(m.announcement_reads)) + for id := range m.announcement_reads { + ids = append(ids, id) + } + return ids case user.EdgeAllowedGroups: ids := make([]ent.Value, 0, len(m.allowed_groups)) for id := range m.allowed_groups { @@ -15994,7 +17730,7 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value { // RemovedEdges returns all edge names that were removed in this mutation. func (m *UserMutation) RemovedEdges() []string { - edges := make([]string, 0, 8) + edges := make([]string, 0, 9) if m.removedapi_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -16007,6 +17743,9 @@ func (m *UserMutation) RemovedEdges() []string { if m.removedassigned_subscriptions != nil { edges = append(edges, user.EdgeAssignedSubscriptions) } + if m.removedannouncement_reads != nil { + edges = append(edges, user.EdgeAnnouncementReads) + } if m.removedallowed_groups != nil { edges = append(edges, user.EdgeAllowedGroups) } @@ -16050,6 +17789,12 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeAnnouncementReads: + ids := make([]ent.Value, 0, len(m.removedannouncement_reads)) + for id := range m.removedannouncement_reads { + ids = append(ids, id) + } + return ids case user.EdgeAllowedGroups: ids := make([]ent.Value, 0, len(m.removedallowed_groups)) for id := range m.removedallowed_groups { @@ -16080,7 +17825,7 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value { // ClearedEdges returns all edge names that were cleared in this mutation. func (m *UserMutation) ClearedEdges() []string { - edges := make([]string, 0, 8) + edges := make([]string, 0, 9) if m.clearedapi_keys { edges = append(edges, user.EdgeAPIKeys) } @@ -16093,6 +17838,9 @@ func (m *UserMutation) ClearedEdges() []string { if m.clearedassigned_subscriptions { edges = append(edges, user.EdgeAssignedSubscriptions) } + if m.clearedannouncement_reads { + edges = append(edges, user.EdgeAnnouncementReads) + } if m.clearedallowed_groups { edges = append(edges, user.EdgeAllowedGroups) } @@ -16120,6 +17868,8 @@ func (m *UserMutation) EdgeCleared(name string) bool { return m.clearedsubscriptions case user.EdgeAssignedSubscriptions: return m.clearedassigned_subscriptions + case user.EdgeAnnouncementReads: + return m.clearedannouncement_reads case user.EdgeAllowedGroups: return m.clearedallowed_groups case user.EdgeUsageLogs: @@ -16156,6 +17906,9 @@ func (m *UserMutation) ResetEdge(name string) error { case user.EdgeAssignedSubscriptions: m.ResetAssignedSubscriptions() return nil + case user.EdgeAnnouncementReads: + m.ResetAnnouncementReads() + return nil case user.EdgeAllowedGroups: m.ResetAllowedGroups() return nil diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 785cb4e6..613c5913 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -15,6 +15,12 @@ type Account func(*sql.Selector) // AccountGroup is the predicate function for accountgroup builders. type AccountGroup func(*sql.Selector) +// Announcement is the predicate function for announcement builders. +type Announcement func(*sql.Selector) + +// AnnouncementRead is the predicate function for announcementread builders. +type AnnouncementRead func(*sql.Selector) + // Group is the predicate function for group builders. type Group func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 14323f8c..ae4eece8 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -7,6 +7,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocode" @@ -210,6 +212,56 @@ func init() { accountgroupDescCreatedAt := accountgroupFields[3].Descriptor() // accountgroup.DefaultCreatedAt holds the default value on creation for the created_at field. accountgroup.DefaultCreatedAt = accountgroupDescCreatedAt.Default.(func() time.Time) + announcementFields := schema.Announcement{}.Fields() + _ = announcementFields + // announcementDescTitle is the schema descriptor for title field. + announcementDescTitle := announcementFields[0].Descriptor() + // announcement.TitleValidator is a validator for the "title" field. It is called by the builders before save. + announcement.TitleValidator = func() func(string) error { + validators := announcementDescTitle.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(title string) error { + for _, fn := range fns { + if err := fn(title); err != nil { + return err + } + } + return nil + } + }() + // announcementDescContent is the schema descriptor for content field. + announcementDescContent := announcementFields[1].Descriptor() + // announcement.ContentValidator is a validator for the "content" field. It is called by the builders before save. + announcement.ContentValidator = announcementDescContent.Validators[0].(func(string) error) + // announcementDescStatus is the schema descriptor for status field. + announcementDescStatus := announcementFields[2].Descriptor() + // announcement.DefaultStatus holds the default value on creation for the status field. + announcement.DefaultStatus = announcementDescStatus.Default.(string) + // announcement.StatusValidator is a validator for the "status" field. It is called by the builders before save. + announcement.StatusValidator = announcementDescStatus.Validators[0].(func(string) error) + // announcementDescCreatedAt is the schema descriptor for created_at field. + announcementDescCreatedAt := announcementFields[8].Descriptor() + // announcement.DefaultCreatedAt holds the default value on creation for the created_at field. + announcement.DefaultCreatedAt = announcementDescCreatedAt.Default.(func() time.Time) + // announcementDescUpdatedAt is the schema descriptor for updated_at field. + announcementDescUpdatedAt := announcementFields[9].Descriptor() + // announcement.DefaultUpdatedAt holds the default value on creation for the updated_at field. + announcement.DefaultUpdatedAt = announcementDescUpdatedAt.Default.(func() time.Time) + // announcement.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + announcement.UpdateDefaultUpdatedAt = announcementDescUpdatedAt.UpdateDefault.(func() time.Time) + announcementreadFields := schema.AnnouncementRead{}.Fields() + _ = announcementreadFields + // announcementreadDescReadAt is the schema descriptor for read_at field. + announcementreadDescReadAt := announcementreadFields[2].Descriptor() + // announcementread.DefaultReadAt holds the default value on creation for the read_at field. + announcementread.DefaultReadAt = announcementreadDescReadAt.Default.(func() time.Time) + // announcementreadDescCreatedAt is the schema descriptor for created_at field. + announcementreadDescCreatedAt := announcementreadFields[3].Descriptor() + // announcementread.DefaultCreatedAt holds the default value on creation for the created_at field. + announcementread.DefaultCreatedAt = announcementreadDescCreatedAt.Default.(func() time.Time) groupMixin := schema.Group{}.Mixin() groupMixinHooks1 := groupMixin[1].Hooks() group.Hooks[0] = groupMixinHooks1[0] diff --git a/backend/ent/schema/account.go b/backend/ent/schema/account.go index dd79ba96..1cfecc2d 100644 --- a/backend/ent/schema/account.go +++ b/backend/ent/schema/account.go @@ -4,7 +4,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -111,7 +111,7 @@ func (Account) Fields() []ent.Field { // status: 账户状态,如 "active", "error", "disabled" field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), // error_message: 错误信息,记录账户异常时的详细信息 field.String("error_message"). diff --git a/backend/ent/schema/announcement.go b/backend/ent/schema/announcement.go new file mode 100644 index 00000000..3b534831 --- /dev/null +++ b/backend/ent/schema/announcement.go @@ -0,0 +1,91 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/domain" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Announcement holds the schema definition for the Announcement entity. +// +// 删除策略:硬删除(已读记录通过外键级联删除) +type Announcement struct { + ent.Schema +} + +func (Announcement) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "announcements"}, + } +} + +func (Announcement) Fields() []ent.Field { + return []ent.Field{ + field.String("title"). + MaxLen(200). + NotEmpty(). + Comment("公告标题"), + field.String("content"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + NotEmpty(). + Comment("公告内容(支持 Markdown)"), + field.String("status"). + MaxLen(20). + Default(domain.AnnouncementStatusDraft). + Comment("状态: draft, active, archived"), + field.JSON("targeting", domain.AnnouncementTargeting{}). + Optional(). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}). + Comment("展示条件(JSON 规则)"), + field.Time("starts_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("开始展示时间(为空表示立即生效)"), + field.Time("ends_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("结束展示时间(为空表示永久生效)"), + field.Int64("created_by"). + Optional(). + Nillable(). + Comment("创建人用户ID(管理员)"), + field.Int64("updated_by"). + Optional(). + Nillable(). + Comment("更新人用户ID(管理员)"), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (Announcement) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("reads", AnnouncementRead.Type), + } +} + +func (Announcement) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("status"), + index.Fields("created_at"), + index.Fields("starts_at"), + index.Fields("ends_at"), + } +} + diff --git a/backend/ent/schema/announcement_read.go b/backend/ent/schema/announcement_read.go new file mode 100644 index 00000000..2f80d8b2 --- /dev/null +++ b/backend/ent/schema/announcement_read.go @@ -0,0 +1,66 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// AnnouncementRead holds the schema definition for the AnnouncementRead entity. +// +// 记录用户对公告的已读状态(首次已读时间)。 +type AnnouncementRead struct { + ent.Schema +} + +func (AnnouncementRead) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "announcement_reads"}, + } +} + +func (AnnouncementRead) Fields() []ent.Field { + return []ent.Field{ + field.Int64("announcement_id"), + field.Int64("user_id"), + field.Time("read_at"). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("用户首次已读时间"), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (AnnouncementRead) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("announcement", Announcement.Type). + Ref("reads"). + Field("announcement_id"). + Unique(). + Required(), + edge.From("user", User.Type). + Ref("announcement_reads"). + Field("user_id"). + Unique(). + Required(), + } +} + +func (AnnouncementRead) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("announcement_id"), + index.Fields("user_id"), + index.Fields("read_at"), + index.Fields("announcement_id", "user_id").Unique(), + } +} + diff --git a/backend/ent/schema/api_key.go b/backend/ent/schema/api_key.go index 1b206089..1c2d4bd4 100644 --- a/backend/ent/schema/api_key.go +++ b/backend/ent/schema/api_key.go @@ -2,7 +2,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect/entsql" @@ -45,7 +45,7 @@ func (APIKey) Fields() []ent.Field { Nillable(), field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), field.JSON("ip_whitelist", []string{}). Optional(). Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"), diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index 5d0a1e9a..ccd72eac 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -2,7 +2,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -49,15 +49,15 @@ func (Group) Fields() []ent.Field { Default(false), field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), // Subscription-related fields (added by migration 003) field.String("platform"). MaxLen(50). - Default(service.PlatformAnthropic), + Default(domain.PlatformAnthropic), field.String("subscription_type"). MaxLen(20). - Default(service.SubscriptionTypeStandard), + Default(domain.SubscriptionTypeStandard), field.Float("daily_limit_usd"). Optional(). Nillable(). diff --git a/backend/ent/schema/promo_code.go b/backend/ent/schema/promo_code.go index c3bb824b..3dd08c0e 100644 --- a/backend/ent/schema/promo_code.go +++ b/backend/ent/schema/promo_code.go @@ -3,7 +3,7 @@ package schema import ( "time" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -49,7 +49,7 @@ func (PromoCode) Fields() []ent.Field { Comment("已使用次数"), field.String("status"). MaxLen(20). - Default(service.PromoCodeStatusActive). + Default(domain.PromoCodeStatusActive). Comment("状态: active, disabled"), field.Time("expires_at"). Optional(). diff --git a/backend/ent/schema/redeem_code.go b/backend/ent/schema/redeem_code.go index b4664e06..6fb86148 100644 --- a/backend/ent/schema/redeem_code.go +++ b/backend/ent/schema/redeem_code.go @@ -3,7 +3,7 @@ package schema import ( "time" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -41,13 +41,13 @@ func (RedeemCode) Fields() []ent.Field { Unique(), field.String("type"). MaxLen(20). - Default(service.RedeemTypeBalance), + Default(domain.RedeemTypeBalance), field.Float("value"). SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). Default(0), field.String("status"). MaxLen(20). - Default(service.StatusUnused), + Default(domain.StatusUnused), field.Int64("used_by"). Optional(). Nillable(), diff --git a/backend/ent/schema/user.go b/backend/ent/schema/user.go index 335c1cc8..d443ef45 100644 --- a/backend/ent/schema/user.go +++ b/backend/ent/schema/user.go @@ -2,7 +2,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -43,7 +43,7 @@ func (User) Fields() []ent.Field { NotEmpty(), field.String("role"). MaxLen(20). - Default(service.RoleUser), + Default(domain.RoleUser), field.Float("balance"). SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). Default(0), @@ -51,7 +51,7 @@ func (User) Fields() []ent.Field { Default(5), field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), // Optional profile fields (added later; default '' in DB migration) field.String("username"). @@ -81,6 +81,7 @@ func (User) Edges() []ent.Edge { edge.To("redeem_codes", RedeemCode.Type), edge.To("subscriptions", UserSubscription.Type), edge.To("assigned_subscriptions", UserSubscription.Type), + edge.To("announcement_reads", AnnouncementRead.Type), edge.To("allowed_groups", Group.Type). Through("user_allowed_groups", UserAllowedGroup.Type), edge.To("usage_logs", UsageLog.Type), diff --git a/backend/ent/schema/user_subscription.go b/backend/ent/schema/user_subscription.go index b21f4083..fa13612b 100644 --- a/backend/ent/schema/user_subscription.go +++ b/backend/ent/schema/user_subscription.go @@ -4,7 +4,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -44,7 +44,7 @@ func (UserSubscription) Fields() []ent.Field { SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), field.String("status"). MaxLen(20). - Default(service.SubscriptionStatusActive), + Default(domain.SubscriptionStatusActive), field.Time("daily_window_start"). Optional(). diff --git a/backend/ent/tx.go b/backend/ent/tx.go index 7ff16ec8..702bdf90 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -20,6 +20,10 @@ type Tx struct { Account *AccountClient // AccountGroup is the client for interacting with the AccountGroup builders. AccountGroup *AccountGroupClient + // Announcement is the client for interacting with the Announcement builders. + Announcement *AnnouncementClient + // AnnouncementRead is the client for interacting with the AnnouncementRead builders. + AnnouncementRead *AnnouncementReadClient // Group is the client for interacting with the Group builders. Group *GroupClient // PromoCode is the client for interacting with the PromoCode builders. @@ -180,6 +184,8 @@ func (tx *Tx) init() { tx.APIKey = NewAPIKeyClient(tx.config) tx.Account = NewAccountClient(tx.config) tx.AccountGroup = NewAccountGroupClient(tx.config) + tx.Announcement = NewAnnouncementClient(tx.config) + tx.AnnouncementRead = NewAnnouncementReadClient(tx.config) tx.Group = NewGroupClient(tx.config) tx.PromoCode = NewPromoCodeClient(tx.config) tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config) diff --git a/backend/ent/user.go b/backend/ent/user.go index 82830a95..2435aa1b 100644 --- a/backend/ent/user.go +++ b/backend/ent/user.go @@ -61,6 +61,8 @@ type UserEdges struct { Subscriptions []*UserSubscription `json:"subscriptions,omitempty"` // AssignedSubscriptions holds the value of the assigned_subscriptions edge. AssignedSubscriptions []*UserSubscription `json:"assigned_subscriptions,omitempty"` + // AnnouncementReads holds the value of the announcement_reads edge. + AnnouncementReads []*AnnouncementRead `json:"announcement_reads,omitempty"` // AllowedGroups holds the value of the allowed_groups edge. AllowedGroups []*Group `json:"allowed_groups,omitempty"` // UsageLogs holds the value of the usage_logs edge. @@ -73,7 +75,7 @@ type UserEdges struct { UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [9]bool + loadedTypes [10]bool } // APIKeysOrErr returns the APIKeys value or an error if the edge @@ -112,10 +114,19 @@ func (e UserEdges) AssignedSubscriptionsOrErr() ([]*UserSubscription, error) { return nil, &NotLoadedError{edge: "assigned_subscriptions"} } +// AnnouncementReadsOrErr returns the AnnouncementReads value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AnnouncementReadsOrErr() ([]*AnnouncementRead, error) { + if e.loadedTypes[4] { + return e.AnnouncementReads, nil + } + return nil, &NotLoadedError{edge: "announcement_reads"} +} + // AllowedGroupsOrErr returns the AllowedGroups value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) AllowedGroupsOrErr() ([]*Group, error) { - if e.loadedTypes[4] { + if e.loadedTypes[5] { return e.AllowedGroups, nil } return nil, &NotLoadedError{edge: "allowed_groups"} @@ -124,7 +135,7 @@ func (e UserEdges) AllowedGroupsOrErr() ([]*Group, error) { // UsageLogsOrErr returns the UsageLogs value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) UsageLogsOrErr() ([]*UsageLog, error) { - if e.loadedTypes[5] { + if e.loadedTypes[6] { return e.UsageLogs, nil } return nil, &NotLoadedError{edge: "usage_logs"} @@ -133,7 +144,7 @@ func (e UserEdges) UsageLogsOrErr() ([]*UsageLog, error) { // AttributeValuesOrErr returns the AttributeValues value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) AttributeValuesOrErr() ([]*UserAttributeValue, error) { - if e.loadedTypes[6] { + if e.loadedTypes[7] { return e.AttributeValues, nil } return nil, &NotLoadedError{edge: "attribute_values"} @@ -142,7 +153,7 @@ func (e UserEdges) AttributeValuesOrErr() ([]*UserAttributeValue, error) { // PromoCodeUsagesOrErr returns the PromoCodeUsages value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) PromoCodeUsagesOrErr() ([]*PromoCodeUsage, error) { - if e.loadedTypes[7] { + if e.loadedTypes[8] { return e.PromoCodeUsages, nil } return nil, &NotLoadedError{edge: "promo_code_usages"} @@ -151,7 +162,7 @@ func (e UserEdges) PromoCodeUsagesOrErr() ([]*PromoCodeUsage, error) { // UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) { - if e.loadedTypes[8] { + if e.loadedTypes[9] { return e.UserAllowedGroups, nil } return nil, &NotLoadedError{edge: "user_allowed_groups"} @@ -313,6 +324,11 @@ func (_m *User) QueryAssignedSubscriptions() *UserSubscriptionQuery { return NewUserClient(_m.config).QueryAssignedSubscriptions(_m) } +// QueryAnnouncementReads queries the "announcement_reads" edge of the User entity. +func (_m *User) QueryAnnouncementReads() *AnnouncementReadQuery { + return NewUserClient(_m.config).QueryAnnouncementReads(_m) +} + // QueryAllowedGroups queries the "allowed_groups" edge of the User entity. func (_m *User) QueryAllowedGroups() *GroupQuery { return NewUserClient(_m.config).QueryAllowedGroups(_m) diff --git a/backend/ent/user/user.go b/backend/ent/user/user.go index 0685ed72..ae9418ff 100644 --- a/backend/ent/user/user.go +++ b/backend/ent/user/user.go @@ -51,6 +51,8 @@ const ( EdgeSubscriptions = "subscriptions" // EdgeAssignedSubscriptions holds the string denoting the assigned_subscriptions edge name in mutations. EdgeAssignedSubscriptions = "assigned_subscriptions" + // EdgeAnnouncementReads holds the string denoting the announcement_reads edge name in mutations. + EdgeAnnouncementReads = "announcement_reads" // EdgeAllowedGroups holds the string denoting the allowed_groups edge name in mutations. EdgeAllowedGroups = "allowed_groups" // EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations. @@ -91,6 +93,13 @@ const ( AssignedSubscriptionsInverseTable = "user_subscriptions" // AssignedSubscriptionsColumn is the table column denoting the assigned_subscriptions relation/edge. AssignedSubscriptionsColumn = "assigned_by" + // AnnouncementReadsTable is the table that holds the announcement_reads relation/edge. + AnnouncementReadsTable = "announcement_reads" + // AnnouncementReadsInverseTable is the table name for the AnnouncementRead entity. + // It exists in this package in order to avoid circular dependency with the "announcementread" package. + AnnouncementReadsInverseTable = "announcement_reads" + // AnnouncementReadsColumn is the table column denoting the announcement_reads relation/edge. + AnnouncementReadsColumn = "user_id" // AllowedGroupsTable is the table that holds the allowed_groups relation/edge. The primary key declared below. AllowedGroupsTable = "user_allowed_groups" // AllowedGroupsInverseTable is the table name for the Group entity. @@ -335,6 +344,20 @@ func ByAssignedSubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOp } } +// ByAnnouncementReadsCount orders the results by announcement_reads count. +func ByAnnouncementReadsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAnnouncementReadsStep(), opts...) + } +} + +// ByAnnouncementReads orders the results by announcement_reads terms. +func ByAnnouncementReads(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAnnouncementReadsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + // ByAllowedGroupsCount orders the results by allowed_groups count. func ByAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { @@ -432,6 +455,13 @@ func newAssignedSubscriptionsStep() *sqlgraph.Step { sqlgraph.Edge(sqlgraph.O2M, false, AssignedSubscriptionsTable, AssignedSubscriptionsColumn), ) } +func newAnnouncementReadsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AnnouncementReadsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AnnouncementReadsTable, AnnouncementReadsColumn), + ) +} func newAllowedGroupsStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), diff --git a/backend/ent/user/where.go b/backend/ent/user/where.go index 3dc4fec7..1de61037 100644 --- a/backend/ent/user/where.go +++ b/backend/ent/user/where.go @@ -952,6 +952,29 @@ func HasAssignedSubscriptionsWith(preds ...predicate.UserSubscription) predicate }) } +// HasAnnouncementReads applies the HasEdge predicate on the "announcement_reads" edge. +func HasAnnouncementReads() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AnnouncementReadsTable, AnnouncementReadsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAnnouncementReadsWith applies the HasEdge predicate on the "announcement_reads" edge with a given conditions (other predicates). +func HasAnnouncementReadsWith(preds ...predicate.AnnouncementRead) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAnnouncementReadsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // HasAllowedGroups applies the HasEdge predicate on the "allowed_groups" edge. func HasAllowedGroups() predicate.User { return predicate.User(func(s *sql.Selector) { diff --git a/backend/ent/user_create.go b/backend/ent/user_create.go index 6b4ebc59..f862a580 100644 --- a/backend/ent/user_create.go +++ b/backend/ent/user_create.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" @@ -269,6 +270,21 @@ func (_c *UserCreate) AddAssignedSubscriptions(v ...*UserSubscription) *UserCrea return _c.AddAssignedSubscriptionIDs(ids...) } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (_c *UserCreate) AddAnnouncementReadIDs(ids ...int64) *UserCreate { + _c.mutation.AddAnnouncementReadIDs(ids...) + return _c +} + +// AddAnnouncementReads adds the "announcement_reads" edges to the AnnouncementRead entity. +func (_c *UserCreate) AddAnnouncementReads(v ...*AnnouncementRead) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAnnouncementReadIDs(ids...) +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. func (_c *UserCreate) AddAllowedGroupIDs(ids ...int64) *UserCreate { _c.mutation.AddAllowedGroupIDs(ids...) @@ -618,6 +634,22 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } + if nodes := _c.mutation.AnnouncementReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } if nodes := _c.mutation.AllowedGroupsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, diff --git a/backend/ent/user_query.go b/backend/ent/user_query.go index e66e2dc8..4b56e16f 100644 --- a/backend/ent/user_query.go +++ b/backend/ent/user_query.go @@ -13,6 +13,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -36,6 +37,7 @@ type UserQuery struct { withRedeemCodes *RedeemCodeQuery withSubscriptions *UserSubscriptionQuery withAssignedSubscriptions *UserSubscriptionQuery + withAnnouncementReads *AnnouncementReadQuery withAllowedGroups *GroupQuery withUsageLogs *UsageLogQuery withAttributeValues *UserAttributeValueQuery @@ -166,6 +168,28 @@ func (_q *UserQuery) QueryAssignedSubscriptions() *UserSubscriptionQuery { return query } +// QueryAnnouncementReads chains the current query on the "announcement_reads" edge. +func (_q *UserQuery) QueryAnnouncementReads() *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AnnouncementReadsTable, user.AnnouncementReadsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + // QueryAllowedGroups chains the current query on the "allowed_groups" edge. func (_q *UserQuery) QueryAllowedGroups() *GroupQuery { query := (&GroupClient{config: _q.config}).Query() @@ -472,6 +496,7 @@ func (_q *UserQuery) Clone() *UserQuery { withRedeemCodes: _q.withRedeemCodes.Clone(), withSubscriptions: _q.withSubscriptions.Clone(), withAssignedSubscriptions: _q.withAssignedSubscriptions.Clone(), + withAnnouncementReads: _q.withAnnouncementReads.Clone(), withAllowedGroups: _q.withAllowedGroups.Clone(), withUsageLogs: _q.withUsageLogs.Clone(), withAttributeValues: _q.withAttributeValues.Clone(), @@ -527,6 +552,17 @@ func (_q *UserQuery) WithAssignedSubscriptions(opts ...func(*UserSubscriptionQue return _q } +// WithAnnouncementReads tells the query-builder to eager-load the nodes that are connected to +// the "announcement_reads" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAnnouncementReads(opts ...func(*AnnouncementReadQuery)) *UserQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAnnouncementReads = query + return _q +} + // WithAllowedGroups tells the query-builder to eager-load the nodes that are connected to // the "allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. func (_q *UserQuery) WithAllowedGroups(opts ...func(*GroupQuery)) *UserQuery { @@ -660,11 +696,12 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e var ( nodes = []*User{} _spec = _q.querySpec() - loadedTypes = [9]bool{ + loadedTypes = [10]bool{ _q.withAPIKeys != nil, _q.withRedeemCodes != nil, _q.withSubscriptions != nil, _q.withAssignedSubscriptions != nil, + _q.withAnnouncementReads != nil, _q.withAllowedGroups != nil, _q.withUsageLogs != nil, _q.withAttributeValues != nil, @@ -723,6 +760,13 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e return nil, err } } + if query := _q.withAnnouncementReads; query != nil { + if err := _q.loadAnnouncementReads(ctx, query, nodes, + func(n *User) { n.Edges.AnnouncementReads = []*AnnouncementRead{} }, + func(n *User, e *AnnouncementRead) { n.Edges.AnnouncementReads = append(n.Edges.AnnouncementReads, e) }); err != nil { + return nil, err + } + } if query := _q.withAllowedGroups; query != nil { if err := _q.loadAllowedGroups(ctx, query, nodes, func(n *User) { n.Edges.AllowedGroups = []*Group{} }, @@ -887,6 +931,36 @@ func (_q *UserQuery) loadAssignedSubscriptions(ctx context.Context, query *UserS } return nil } +func (_q *UserQuery) loadAnnouncementReads(ctx context.Context, query *AnnouncementReadQuery, nodes []*User, init func(*User), assign func(*User, *AnnouncementRead)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(announcementread.FieldUserID) + } + query.Where(predicate.AnnouncementRead(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.AnnouncementReadsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} func (_q *UserQuery) loadAllowedGroups(ctx context.Context, query *GroupQuery, nodes []*User, init func(*User), assign func(*User, *Group)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[int64]*User) diff --git a/backend/ent/user_update.go b/backend/ent/user_update.go index b98a41c6..80222c92 100644 --- a/backend/ent/user_update.go +++ b/backend/ent/user_update.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -301,6 +302,21 @@ func (_u *UserUpdate) AddAssignedSubscriptions(v ...*UserSubscription) *UserUpda return _u.AddAssignedSubscriptionIDs(ids...) } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (_u *UserUpdate) AddAnnouncementReadIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAnnouncementReadIDs(ids...) + return _u +} + +// AddAnnouncementReads adds the "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdate) AddAnnouncementReads(v ...*AnnouncementRead) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAnnouncementReadIDs(ids...) +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. func (_u *UserUpdate) AddAllowedGroupIDs(ids ...int64) *UserUpdate { _u.mutation.AddAllowedGroupIDs(ids...) @@ -450,6 +466,27 @@ func (_u *UserUpdate) RemoveAssignedSubscriptions(v ...*UserSubscription) *UserU return _u.RemoveAssignedSubscriptionIDs(ids...) } +// ClearAnnouncementReads clears all "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdate) ClearAnnouncementReads() *UserUpdate { + _u.mutation.ClearAnnouncementReads() + return _u +} + +// RemoveAnnouncementReadIDs removes the "announcement_reads" edge to AnnouncementRead entities by IDs. +func (_u *UserUpdate) RemoveAnnouncementReadIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAnnouncementReadIDs(ids...) + return _u +} + +// RemoveAnnouncementReads removes "announcement_reads" edges to AnnouncementRead entities. +func (_u *UserUpdate) RemoveAnnouncementReads(v ...*AnnouncementRead) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAnnouncementReadIDs(ids...) +} + // ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. func (_u *UserUpdate) ClearAllowedGroups() *UserUpdate { _u.mutation.ClearAllowedGroups() @@ -852,6 +889,51 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAnnouncementReadsIDs(); len(nodes) > 0 && !_u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if _u.mutation.AllowedGroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, @@ -1330,6 +1412,21 @@ func (_u *UserUpdateOne) AddAssignedSubscriptions(v ...*UserSubscription) *UserU return _u.AddAssignedSubscriptionIDs(ids...) } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (_u *UserUpdateOne) AddAnnouncementReadIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAnnouncementReadIDs(ids...) + return _u +} + +// AddAnnouncementReads adds the "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdateOne) AddAnnouncementReads(v ...*AnnouncementRead) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAnnouncementReadIDs(ids...) +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. func (_u *UserUpdateOne) AddAllowedGroupIDs(ids ...int64) *UserUpdateOne { _u.mutation.AddAllowedGroupIDs(ids...) @@ -1479,6 +1576,27 @@ func (_u *UserUpdateOne) RemoveAssignedSubscriptions(v ...*UserSubscription) *Us return _u.RemoveAssignedSubscriptionIDs(ids...) } +// ClearAnnouncementReads clears all "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdateOne) ClearAnnouncementReads() *UserUpdateOne { + _u.mutation.ClearAnnouncementReads() + return _u +} + +// RemoveAnnouncementReadIDs removes the "announcement_reads" edge to AnnouncementRead entities by IDs. +func (_u *UserUpdateOne) RemoveAnnouncementReadIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAnnouncementReadIDs(ids...) + return _u +} + +// RemoveAnnouncementReads removes "announcement_reads" edges to AnnouncementRead entities. +func (_u *UserUpdateOne) RemoveAnnouncementReads(v ...*AnnouncementRead) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAnnouncementReadIDs(ids...) +} + // ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. func (_u *UserUpdateOne) ClearAllowedGroups() *UserUpdateOne { _u.mutation.ClearAllowedGroups() @@ -1911,6 +2029,51 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAnnouncementReadsIDs(); len(nodes) > 0 && !_u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if _u.mutation.AllowedGroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, diff --git a/backend/internal/domain/announcement.go b/backend/internal/domain/announcement.go new file mode 100644 index 00000000..7dc9a9cc --- /dev/null +++ b/backend/internal/domain/announcement.go @@ -0,0 +1,226 @@ +package domain + +import ( + "strings" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +const ( + AnnouncementStatusDraft = "draft" + AnnouncementStatusActive = "active" + AnnouncementStatusArchived = "archived" +) + +const ( + AnnouncementConditionTypeSubscription = "subscription" + AnnouncementConditionTypeBalance = "balance" +) + +const ( + AnnouncementOperatorIn = "in" + AnnouncementOperatorGT = "gt" + AnnouncementOperatorGTE = "gte" + AnnouncementOperatorLT = "lt" + AnnouncementOperatorLTE = "lte" + AnnouncementOperatorEQ = "eq" +) + +var ( + ErrAnnouncementNotFound = infraerrors.NotFound("ANNOUNCEMENT_NOT_FOUND", "announcement not found") + ErrAnnouncementInvalidTarget = infraerrors.BadRequest("ANNOUNCEMENT_INVALID_TARGET", "invalid announcement targeting rules") +) + +type AnnouncementTargeting struct { + // AnyOf 表示 OR:任意一个条件组满足即可展示。 + AnyOf []AnnouncementConditionGroup `json:"any_of,omitempty"` +} + +type AnnouncementConditionGroup struct { + // AllOf 表示 AND:组内所有条件都满足才算命中该组。 + AllOf []AnnouncementCondition `json:"all_of,omitempty"` +} + +type AnnouncementCondition struct { + // Type: subscription | balance + Type string `json:"type"` + + // Operator: + // - subscription: in + // - balance: gt/gte/lt/lte/eq + Operator string `json:"operator"` + + // subscription 条件:匹配的订阅套餐(group_id) + GroupIDs []int64 `json:"group_ids,omitempty"` + + // balance 条件:比较阈值 + Value float64 `json:"value,omitempty"` +} + +func (t AnnouncementTargeting) Matches(balance float64, activeSubscriptionGroupIDs map[int64]struct{}) bool { + // 空规则:展示给所有用户 + if len(t.AnyOf) == 0 { + return true + } + + for _, group := range t.AnyOf { + if len(group.AllOf) == 0 { + // 空条件组不命中(避免 OR 中出现无条件 “全命中”) + continue + } + allMatched := true + for _, cond := range group.AllOf { + if !cond.Matches(balance, activeSubscriptionGroupIDs) { + allMatched = false + break + } + } + if allMatched { + return true + } + } + + return false +} + +func (c AnnouncementCondition) Matches(balance float64, activeSubscriptionGroupIDs map[int64]struct{}) bool { + switch c.Type { + case AnnouncementConditionTypeSubscription: + if c.Operator != AnnouncementOperatorIn { + return false + } + if len(c.GroupIDs) == 0 { + return false + } + if len(activeSubscriptionGroupIDs) == 0 { + return false + } + for _, gid := range c.GroupIDs { + if _, ok := activeSubscriptionGroupIDs[gid]; ok { + return true + } + } + return false + + case AnnouncementConditionTypeBalance: + switch c.Operator { + case AnnouncementOperatorGT: + return balance > c.Value + case AnnouncementOperatorGTE: + return balance >= c.Value + case AnnouncementOperatorLT: + return balance < c.Value + case AnnouncementOperatorLTE: + return balance <= c.Value + case AnnouncementOperatorEQ: + return balance == c.Value + default: + return false + } + + default: + return false + } +} + +func (t AnnouncementTargeting) NormalizeAndValidate() (AnnouncementTargeting, error) { + normalized := AnnouncementTargeting{AnyOf: make([]AnnouncementConditionGroup, 0, len(t.AnyOf))} + + // 允许空 targeting(展示给所有用户) + if len(t.AnyOf) == 0 { + return normalized, nil + } + + if len(t.AnyOf) > 50 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + + for _, g := range t.AnyOf { + if len(g.AllOf) == 0 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + if len(g.AllOf) > 50 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + + group := AnnouncementConditionGroup{AllOf: make([]AnnouncementCondition, 0, len(g.AllOf))} + for _, c := range g.AllOf { + cond := AnnouncementCondition{ + Type: strings.TrimSpace(c.Type), + Operator: strings.TrimSpace(c.Operator), + Value: c.Value, + } + for _, gid := range c.GroupIDs { + if gid <= 0 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + cond.GroupIDs = append(cond.GroupIDs, gid) + } + + if err := cond.validate(); err != nil { + return AnnouncementTargeting{}, err + } + group.AllOf = append(group.AllOf, cond) + } + + normalized.AnyOf = append(normalized.AnyOf, group) + } + + return normalized, nil +} + +func (c AnnouncementCondition) validate() error { + switch c.Type { + case AnnouncementConditionTypeSubscription: + if c.Operator != AnnouncementOperatorIn { + return ErrAnnouncementInvalidTarget + } + if len(c.GroupIDs) == 0 { + return ErrAnnouncementInvalidTarget + } + return nil + + case AnnouncementConditionTypeBalance: + switch c.Operator { + case AnnouncementOperatorGT, AnnouncementOperatorGTE, AnnouncementOperatorLT, AnnouncementOperatorLTE, AnnouncementOperatorEQ: + return nil + default: + return ErrAnnouncementInvalidTarget + } + + default: + return ErrAnnouncementInvalidTarget + } +} + +type Announcement struct { + ID int64 + Title string + Content string + Status string + Targeting AnnouncementTargeting + StartsAt *time.Time + EndsAt *time.Time + CreatedBy *int64 + UpdatedBy *int64 + CreatedAt time.Time + UpdatedAt time.Time +} + +func (a *Announcement) IsActiveAt(now time.Time) bool { + if a == nil { + return false + } + if a.Status != AnnouncementStatusActive { + return false + } + if a.StartsAt != nil && now.Before(*a.StartsAt) { + return false + } + if a.EndsAt != nil && !now.Before(*a.EndsAt) { + // ends_at 语义:到点即下线 + return false + } + return true +} diff --git a/backend/internal/domain/constants.go b/backend/internal/domain/constants.go new file mode 100644 index 00000000..4ecea9d8 --- /dev/null +++ b/backend/internal/domain/constants.go @@ -0,0 +1,64 @@ +package domain + +// Status constants +const ( + StatusActive = "active" + StatusDisabled = "disabled" + StatusError = "error" + StatusUnused = "unused" + StatusUsed = "used" + StatusExpired = "expired" +) + +// Role constants +const ( + RoleAdmin = "admin" + RoleUser = "user" +) + +// Platform constants +const ( + PlatformAnthropic = "anthropic" + PlatformOpenAI = "openai" + PlatformGemini = "gemini" + PlatformAntigravity = "antigravity" +) + +// Account type constants +const ( + AccountTypeOAuth = "oauth" // OAuth类型账号(full scope: profile + inference) + AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope) + AccountTypeAPIKey = "apikey" // API Key类型账号 +) + +// Redeem type constants +const ( + RedeemTypeBalance = "balance" + RedeemTypeConcurrency = "concurrency" + RedeemTypeSubscription = "subscription" +) + +// PromoCode status constants +const ( + PromoCodeStatusActive = "active" + PromoCodeStatusDisabled = "disabled" +) + +// Admin adjustment type constants +const ( + AdjustmentTypeAdminBalance = "admin_balance" // 管理员调整余额 + AdjustmentTypeAdminConcurrency = "admin_concurrency" // 管理员调整并发数 +) + +// Group subscription type constants +const ( + SubscriptionTypeStandard = "standard" // 标准计费模式(按余额扣费) + SubscriptionTypeSubscription = "subscription" // 订阅模式(按限额控制) +) + +// Subscription status constants +const ( + SubscriptionStatusActive = "active" + SubscriptionStatusExpired = "expired" + SubscriptionStatusSuspended = "suspended" +) diff --git a/backend/internal/handler/admin/announcement_handler.go b/backend/internal/handler/admin/announcement_handler.go new file mode 100644 index 00000000..a4e9f2f0 --- /dev/null +++ b/backend/internal/handler/admin/announcement_handler.go @@ -0,0 +1,247 @@ +package admin + +import ( + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// AnnouncementHandler handles admin announcement management +type AnnouncementHandler struct { + announcementService *service.AnnouncementService +} + +// NewAnnouncementHandler creates a new admin announcement handler +func NewAnnouncementHandler(announcementService *service.AnnouncementService) *AnnouncementHandler { + return &AnnouncementHandler{ + announcementService: announcementService, + } +} + +type CreateAnnouncementRequest struct { + Title string `json:"title" binding:"required"` + Content string `json:"content" binding:"required"` + Status string `json:"status" binding:"omitempty,oneof=draft active archived"` + Targeting service.AnnouncementTargeting `json:"targeting"` + StartsAt *int64 `json:"starts_at"` // Unix seconds, 0/empty = immediate + EndsAt *int64 `json:"ends_at"` // Unix seconds, 0/empty = never +} + +type UpdateAnnouncementRequest struct { + Title *string `json:"title"` + Content *string `json:"content"` + Status *string `json:"status" binding:"omitempty,oneof=draft active archived"` + Targeting *service.AnnouncementTargeting `json:"targeting"` + StartsAt *int64 `json:"starts_at"` // Unix seconds, 0 = clear + EndsAt *int64 `json:"ends_at"` // Unix seconds, 0 = clear +} + +// List handles listing announcements with filters +// GET /api/v1/admin/announcements +func (h *AnnouncementHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + status := strings.TrimSpace(c.Query("status")) + search := strings.TrimSpace(c.Query("search")) + if len(search) > 200 { + search = search[:200] + } + + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + + items, paginationResult, err := h.announcementService.List( + c.Request.Context(), + params, + service.AnnouncementListFilters{Status: status, Search: search}, + ) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.Announcement, 0, len(items)) + for i := range items { + out = append(out, *dto.AnnouncementFromService(&items[i])) + } + response.Paginated(c, out, paginationResult.Total, page, pageSize) +} + +// GetByID handles getting an announcement by ID +// GET /api/v1/admin/announcements/:id +func (h *AnnouncementHandler) GetByID(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + item, err := h.announcementService.GetByID(c.Request.Context(), announcementID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AnnouncementFromService(item)) +} + +// Create handles creating a new announcement +// POST /api/v1/admin/announcements +func (h *AnnouncementHandler) Create(c *gin.Context) { + var req CreateAnnouncementRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + input := &service.CreateAnnouncementInput{ + Title: req.Title, + Content: req.Content, + Status: req.Status, + Targeting: req.Targeting, + ActorID: &subject.UserID, + } + + if req.StartsAt != nil && *req.StartsAt > 0 { + t := time.Unix(*req.StartsAt, 0) + input.StartsAt = &t + } + if req.EndsAt != nil && *req.EndsAt > 0 { + t := time.Unix(*req.EndsAt, 0) + input.EndsAt = &t + } + + created, err := h.announcementService.Create(c.Request.Context(), input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AnnouncementFromService(created)) +} + +// Update handles updating an announcement +// PUT /api/v1/admin/announcements/:id +func (h *AnnouncementHandler) Update(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + var req UpdateAnnouncementRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + input := &service.UpdateAnnouncementInput{ + Title: req.Title, + Content: req.Content, + Status: req.Status, + Targeting: req.Targeting, + ActorID: &subject.UserID, + } + + if req.StartsAt != nil { + if *req.StartsAt == 0 { + var cleared *time.Time = nil + input.StartsAt = &cleared + } else { + t := time.Unix(*req.StartsAt, 0) + ptr := &t + input.StartsAt = &ptr + } + } + + if req.EndsAt != nil { + if *req.EndsAt == 0 { + var cleared *time.Time = nil + input.EndsAt = &cleared + } else { + t := time.Unix(*req.EndsAt, 0) + ptr := &t + input.EndsAt = &ptr + } + } + + updated, err := h.announcementService.Update(c.Request.Context(), announcementID, input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AnnouncementFromService(updated)) +} + +// Delete handles deleting an announcement +// DELETE /api/v1/admin/announcements/:id +func (h *AnnouncementHandler) Delete(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + if err := h.announcementService.Delete(c.Request.Context(), announcementID); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Announcement deleted successfully"}) +} + +// ListReadStatus handles listing users read status for an announcement +// GET /api/v1/admin/announcements/:id/read-status +func (h *AnnouncementHandler) ListReadStatus(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + page, pageSize := response.ParsePagination(c) + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + search := strings.TrimSpace(c.Query("search")) + if len(search) > 200 { + search = search[:200] + } + + items, paginationResult, err := h.announcementService.ListUserReadStatus( + c.Request.Context(), + announcementID, + params, + search, + ) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Paginated(c, items, paginationResult.Total, page, pageSize) +} + diff --git a/backend/internal/handler/announcement_handler.go b/backend/internal/handler/announcement_handler.go new file mode 100644 index 00000000..1e1424eb --- /dev/null +++ b/backend/internal/handler/announcement_handler.go @@ -0,0 +1,82 @@ +package handler + +import ( + "strconv" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// AnnouncementHandler handles user announcement operations +type AnnouncementHandler struct { + announcementService *service.AnnouncementService +} + +// NewAnnouncementHandler creates a new user announcement handler +func NewAnnouncementHandler(announcementService *service.AnnouncementService) *AnnouncementHandler { + return &AnnouncementHandler{ + announcementService: announcementService, + } +} + +// List handles listing announcements visible to current user +// GET /api/v1/announcements +func (h *AnnouncementHandler) List(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + unreadOnly := parseBoolQuery(c.Query("unread_only")) + + items, err := h.announcementService.ListForUser(c.Request.Context(), subject.UserID, unreadOnly) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UserAnnouncement, 0, len(items)) + for i := range items { + out = append(out, *dto.UserAnnouncementFromService(&items[i])) + } + response.Success(c, out) +} + +// MarkRead marks an announcement as read for current user +// POST /api/v1/announcements/:id/read +func (h *AnnouncementHandler) MarkRead(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + if err := h.announcementService.MarkRead(c.Request.Context(), subject.UserID, announcementID); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "ok"}) +} + +func parseBoolQuery(v string) bool { + switch strings.TrimSpace(strings.ToLower(v)) { + case "1", "true", "yes", "y", "on": + return true + default: + return false + } +} + diff --git a/backend/internal/handler/dto/announcement.go b/backend/internal/handler/dto/announcement.go new file mode 100644 index 00000000..ec2a8ca7 --- /dev/null +++ b/backend/internal/handler/dto/announcement.go @@ -0,0 +1,75 @@ +package dto + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type Announcement struct { + ID int64 `json:"id"` + Title string `json:"title"` + Content string `json:"content"` + Status string `json:"status"` + + Targeting service.AnnouncementTargeting `json:"targeting"` + + StartsAt *time.Time `json:"starts_at,omitempty"` + EndsAt *time.Time `json:"ends_at,omitempty"` + + CreatedBy *int64 `json:"created_by,omitempty"` + UpdatedBy *int64 `json:"updated_by,omitempty"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type UserAnnouncement struct { + ID int64 `json:"id"` + Title string `json:"title"` + Content string `json:"content"` + + StartsAt *time.Time `json:"starts_at,omitempty"` + EndsAt *time.Time `json:"ends_at,omitempty"` + + ReadAt *time.Time `json:"read_at,omitempty"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func AnnouncementFromService(a *service.Announcement) *Announcement { + if a == nil { + return nil + } + return &Announcement{ + ID: a.ID, + Title: a.Title, + Content: a.Content, + Status: a.Status, + Targeting: a.Targeting, + StartsAt: a.StartsAt, + EndsAt: a.EndsAt, + CreatedBy: a.CreatedBy, + UpdatedBy: a.UpdatedBy, + CreatedAt: a.CreatedAt, + UpdatedAt: a.UpdatedAt, + } +} + +func UserAnnouncementFromService(a *service.UserAnnouncement) *UserAnnouncement { + if a == nil { + return nil + } + return &UserAnnouncement{ + ID: a.Announcement.ID, + Title: a.Announcement.Title, + Content: a.Announcement.Content, + StartsAt: a.Announcement.StartsAt, + EndsAt: a.Announcement.EndsAt, + ReadAt: a.ReadAt, + CreatedAt: a.Announcement.CreatedAt, + UpdatedAt: a.Announcement.UpdatedAt, + } +} + diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index 907c314d..b8f7d417 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -10,6 +10,7 @@ type AdminHandlers struct { User *admin.UserHandler Group *admin.GroupHandler Account *admin.AccountHandler + Announcement *admin.AnnouncementHandler OAuth *admin.OAuthHandler OpenAIOAuth *admin.OpenAIOAuthHandler GeminiOAuth *admin.GeminiOAuthHandler @@ -33,6 +34,7 @@ type Handlers struct { Usage *UsageHandler Redeem *RedeemHandler Subscription *SubscriptionHandler + Announcement *AnnouncementHandler Admin *AdminHandlers Gateway *GatewayHandler OpenAIGateway *OpenAIGatewayHandler diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index 92e8edeb..48a3794b 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -13,6 +13,7 @@ func ProvideAdminHandlers( userHandler *admin.UserHandler, groupHandler *admin.GroupHandler, accountHandler *admin.AccountHandler, + announcementHandler *admin.AnnouncementHandler, oauthHandler *admin.OAuthHandler, openaiOAuthHandler *admin.OpenAIOAuthHandler, geminiOAuthHandler *admin.GeminiOAuthHandler, @@ -32,6 +33,7 @@ func ProvideAdminHandlers( User: userHandler, Group: groupHandler, Account: accountHandler, + Announcement: announcementHandler, OAuth: oauthHandler, OpenAIOAuth: openaiOAuthHandler, GeminiOAuth: geminiOAuthHandler, @@ -66,6 +68,7 @@ func ProvideHandlers( usageHandler *UsageHandler, redeemHandler *RedeemHandler, subscriptionHandler *SubscriptionHandler, + announcementHandler *AnnouncementHandler, adminHandlers *AdminHandlers, gatewayHandler *GatewayHandler, openaiGatewayHandler *OpenAIGatewayHandler, @@ -79,6 +82,7 @@ func ProvideHandlers( Usage: usageHandler, Redeem: redeemHandler, Subscription: subscriptionHandler, + Announcement: announcementHandler, Admin: adminHandlers, Gateway: gatewayHandler, OpenAIGateway: openaiGatewayHandler, @@ -96,6 +100,7 @@ var ProviderSet = wire.NewSet( NewUsageHandler, NewRedeemHandler, NewSubscriptionHandler, + NewAnnouncementHandler, NewGatewayHandler, NewOpenAIGatewayHandler, NewTotpHandler, @@ -106,6 +111,7 @@ var ProviderSet = wire.NewSet( admin.NewUserHandler, admin.NewGroupHandler, admin.NewAccountHandler, + admin.NewAnnouncementHandler, admin.NewOAuthHandler, admin.NewOpenAIOAuthHandler, admin.NewGeminiOAuthHandler, diff --git a/backend/internal/repository/announcement_read_repo.go b/backend/internal/repository/announcement_read_repo.go new file mode 100644 index 00000000..1c6b480a --- /dev/null +++ b/backend/internal/repository/announcement_read_repo.go @@ -0,0 +1,84 @@ +package repository + +import ( + "context" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type announcementReadRepository struct { + client *dbent.Client +} + +func NewAnnouncementReadRepository(client *dbent.Client) service.AnnouncementReadRepository { + return &announcementReadRepository{client: client} +} + +func (r *announcementReadRepository) MarkRead(ctx context.Context, announcementID, userID int64, readAt time.Time) error { + client := clientFromContext(ctx, r.client) + return client.AnnouncementRead.Create(). + SetAnnouncementID(announcementID). + SetUserID(userID). + SetReadAt(readAt). + OnConflictColumns(announcementread.FieldAnnouncementID, announcementread.FieldUserID). + DoNothing(). + Exec(ctx) +} + +func (r *announcementReadRepository) GetReadMapByUser(ctx context.Context, userID int64, announcementIDs []int64) (map[int64]time.Time, error) { + if len(announcementIDs) == 0 { + return map[int64]time.Time{}, nil + } + + rows, err := r.client.AnnouncementRead.Query(). + Where( + announcementread.UserIDEQ(userID), + announcementread.AnnouncementIDIn(announcementIDs...), + ). + All(ctx) + if err != nil { + return nil, err + } + + out := make(map[int64]time.Time, len(rows)) + for i := range rows { + out[rows[i].AnnouncementID] = rows[i].ReadAt + } + return out, nil +} + +func (r *announcementReadRepository) GetReadMapByUsers(ctx context.Context, announcementID int64, userIDs []int64) (map[int64]time.Time, error) { + if len(userIDs) == 0 { + return map[int64]time.Time{}, nil + } + + rows, err := r.client.AnnouncementRead.Query(). + Where( + announcementread.AnnouncementIDEQ(announcementID), + announcementread.UserIDIn(userIDs...), + ). + All(ctx) + if err != nil { + return nil, err + } + + out := make(map[int64]time.Time, len(rows)) + for i := range rows { + out[rows[i].UserID] = rows[i].ReadAt + } + return out, nil +} + +func (r *announcementReadRepository) CountByAnnouncementID(ctx context.Context, announcementID int64) (int64, error) { + count, err := r.client.AnnouncementRead.Query(). + Where(announcementread.AnnouncementIDEQ(announcementID)). + Count(ctx) + if err != nil { + return 0, err + } + return int64(count), nil +} + diff --git a/backend/internal/repository/announcement_repo.go b/backend/internal/repository/announcement_repo.go new file mode 100644 index 00000000..edeb82e6 --- /dev/null +++ b/backend/internal/repository/announcement_repo.go @@ -0,0 +1,195 @@ +package repository + +import ( + "context" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type announcementRepository struct { + client *dbent.Client +} + +func NewAnnouncementRepository(client *dbent.Client) service.AnnouncementRepository { + return &announcementRepository{client: client} +} + +func (r *announcementRepository) Create(ctx context.Context, a *service.Announcement) error { + client := clientFromContext(ctx, r.client) + builder := client.Announcement.Create(). + SetTitle(a.Title). + SetContent(a.Content). + SetStatus(a.Status). + SetTargeting(a.Targeting) + + if a.StartsAt != nil { + builder.SetStartsAt(*a.StartsAt) + } + if a.EndsAt != nil { + builder.SetEndsAt(*a.EndsAt) + } + if a.CreatedBy != nil { + builder.SetCreatedBy(*a.CreatedBy) + } + if a.UpdatedBy != nil { + builder.SetUpdatedBy(*a.UpdatedBy) + } + + created, err := builder.Save(ctx) + if err != nil { + return err + } + + applyAnnouncementEntityToService(a, created) + return nil +} + +func (r *announcementRepository) GetByID(ctx context.Context, id int64) (*service.Announcement, error) { + m, err := r.client.Announcement.Query(). + Where(announcement.IDEQ(id)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrAnnouncementNotFound, nil) + } + return announcementEntityToService(m), nil +} + +func (r *announcementRepository) Update(ctx context.Context, a *service.Announcement) error { + client := clientFromContext(ctx, r.client) + builder := client.Announcement.UpdateOneID(a.ID). + SetTitle(a.Title). + SetContent(a.Content). + SetStatus(a.Status). + SetTargeting(a.Targeting) + + if a.StartsAt != nil { + builder.SetStartsAt(*a.StartsAt) + } else { + builder.ClearStartsAt() + } + if a.EndsAt != nil { + builder.SetEndsAt(*a.EndsAt) + } else { + builder.ClearEndsAt() + } + if a.CreatedBy != nil { + builder.SetCreatedBy(*a.CreatedBy) + } else { + builder.ClearCreatedBy() + } + if a.UpdatedBy != nil { + builder.SetUpdatedBy(*a.UpdatedBy) + } else { + builder.ClearUpdatedBy() + } + + updated, err := builder.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrAnnouncementNotFound, nil) + } + + a.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *announcementRepository) Delete(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.Announcement.Delete().Where(announcement.IDEQ(id)).Exec(ctx) + return err +} + +func (r *announcementRepository) List( + ctx context.Context, + params pagination.PaginationParams, + filters service.AnnouncementListFilters, +) ([]service.Announcement, *pagination.PaginationResult, error) { + q := r.client.Announcement.Query() + + if filters.Status != "" { + q = q.Where(announcement.StatusEQ(filters.Status)) + } + if filters.Search != "" { + q = q.Where( + announcement.Or( + announcement.TitleContainsFold(filters.Search), + announcement.ContentContainsFold(filters.Search), + ), + ) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + items, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(announcement.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + out := announcementEntitiesToService(items) + return out, paginationResultFromTotal(int64(total), params), nil +} + +func (r *announcementRepository) ListActive(ctx context.Context, now time.Time) ([]service.Announcement, error) { + q := r.client.Announcement.Query(). + Where( + announcement.StatusEQ(service.AnnouncementStatusActive), + announcement.Or(announcement.StartsAtIsNil(), announcement.StartsAtLTE(now)), + announcement.Or(announcement.EndsAtIsNil(), announcement.EndsAtGT(now)), + ). + Order(dbent.Desc(announcement.FieldID)) + + items, err := q.All(ctx) + if err != nil { + return nil, err + } + return announcementEntitiesToService(items), nil +} + +func applyAnnouncementEntityToService(dst *service.Announcement, src *dbent.Announcement) { + if dst == nil || src == nil { + return + } + dst.ID = src.ID + dst.CreatedAt = src.CreatedAt + dst.UpdatedAt = src.UpdatedAt +} + +func announcementEntityToService(m *dbent.Announcement) *service.Announcement { + if m == nil { + return nil + } + return &service.Announcement{ + ID: m.ID, + Title: m.Title, + Content: m.Content, + Status: m.Status, + Targeting: m.Targeting, + StartsAt: m.StartsAt, + EndsAt: m.EndsAt, + CreatedBy: m.CreatedBy, + UpdatedBy: m.UpdatedBy, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func announcementEntitiesToService(models []*dbent.Announcement) []service.Announcement { + out := make([]service.Announcement, 0, len(models)) + for i := range models { + if s := announcementEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} + diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index 3e1c05fc..e3394361 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -56,6 +56,8 @@ var ProviderSet = wire.NewSet( NewProxyRepository, NewRedeemCodeRepository, NewPromoCodeRepository, + NewAnnouncementRepository, + NewAnnouncementReadRepository, NewUsageLogRepository, NewUsageCleanupRepository, NewDashboardAggregationRepository, diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 050e724d..3e0033e7 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -29,6 +29,9 @@ func RegisterAdminRoutes( // 账号管理 registerAccountRoutes(admin, h) + // 公告管理 + registerAnnouncementRoutes(admin, h) + // OpenAI OAuth registerOpenAIOAuthRoutes(admin, h) @@ -229,6 +232,18 @@ func registerAccountRoutes(admin *gin.RouterGroup, h *handler.Handlers) { } } +func registerAnnouncementRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + announcements := admin.Group("/announcements") + { + announcements.GET("", h.Admin.Announcement.List) + announcements.POST("", h.Admin.Announcement.Create) + announcements.GET("/:id", h.Admin.Announcement.GetByID) + announcements.PUT("/:id", h.Admin.Announcement.Update) + announcements.DELETE("/:id", h.Admin.Announcement.Delete) + announcements.GET("/:id/read-status", h.Admin.Announcement.ListReadStatus) + } +} + func registerOpenAIOAuthRoutes(admin *gin.RouterGroup, h *handler.Handlers) { openai := admin.Group("/openai") { diff --git a/backend/internal/server/routes/user.go b/backend/internal/server/routes/user.go index 83cf31c4..5581e1e1 100644 --- a/backend/internal/server/routes/user.go +++ b/backend/internal/server/routes/user.go @@ -64,6 +64,13 @@ func RegisterUserRoutes( usage.POST("/dashboard/api-keys-usage", h.Usage.DashboardAPIKeysUsage) } + // 公告(用户可见) + announcements := authenticated.Group("/announcements") + { + announcements.GET("", h.Announcement.List) + announcements.POST("/:id/read", h.Announcement.MarkRead) + } + // 卡密兑换 redeem := authenticated.Group("/redeem") { diff --git a/backend/internal/service/announcement.go b/backend/internal/service/announcement.go new file mode 100644 index 00000000..2ba5af5d --- /dev/null +++ b/backend/internal/service/announcement.go @@ -0,0 +1,64 @@ +package service + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/domain" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + AnnouncementStatusDraft = domain.AnnouncementStatusDraft + AnnouncementStatusActive = domain.AnnouncementStatusActive + AnnouncementStatusArchived = domain.AnnouncementStatusArchived +) + +const ( + AnnouncementConditionTypeSubscription = domain.AnnouncementConditionTypeSubscription + AnnouncementConditionTypeBalance = domain.AnnouncementConditionTypeBalance +) + +const ( + AnnouncementOperatorIn = domain.AnnouncementOperatorIn + AnnouncementOperatorGT = domain.AnnouncementOperatorGT + AnnouncementOperatorGTE = domain.AnnouncementOperatorGTE + AnnouncementOperatorLT = domain.AnnouncementOperatorLT + AnnouncementOperatorLTE = domain.AnnouncementOperatorLTE + AnnouncementOperatorEQ = domain.AnnouncementOperatorEQ +) + +var ( + ErrAnnouncementNotFound = domain.ErrAnnouncementNotFound + ErrAnnouncementInvalidTarget = domain.ErrAnnouncementInvalidTarget +) + +type AnnouncementTargeting = domain.AnnouncementTargeting + +type AnnouncementConditionGroup = domain.AnnouncementConditionGroup + +type AnnouncementCondition = domain.AnnouncementCondition + +type Announcement = domain.Announcement + +type AnnouncementListFilters struct { + Status string + Search string +} + +type AnnouncementRepository interface { + Create(ctx context.Context, a *Announcement) error + GetByID(ctx context.Context, id int64) (*Announcement, error) + Update(ctx context.Context, a *Announcement) error + Delete(ctx context.Context, id int64) error + + List(ctx context.Context, params pagination.PaginationParams, filters AnnouncementListFilters) ([]Announcement, *pagination.PaginationResult, error) + ListActive(ctx context.Context, now time.Time) ([]Announcement, error) +} + +type AnnouncementReadRepository interface { + MarkRead(ctx context.Context, announcementID, userID int64, readAt time.Time) error + GetReadMapByUser(ctx context.Context, userID int64, announcementIDs []int64) (map[int64]time.Time, error) + GetReadMapByUsers(ctx context.Context, announcementID int64, userIDs []int64) (map[int64]time.Time, error) + CountByAnnouncementID(ctx context.Context, announcementID int64) (int64, error) +} diff --git a/backend/internal/service/announcement_service.go b/backend/internal/service/announcement_service.go new file mode 100644 index 00000000..c2588e6c --- /dev/null +++ b/backend/internal/service/announcement_service.go @@ -0,0 +1,378 @@ +package service + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/domain" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +type AnnouncementService struct { + announcementRepo AnnouncementRepository + readRepo AnnouncementReadRepository + userRepo UserRepository + userSubRepo UserSubscriptionRepository +} + +func NewAnnouncementService( + announcementRepo AnnouncementRepository, + readRepo AnnouncementReadRepository, + userRepo UserRepository, + userSubRepo UserSubscriptionRepository, +) *AnnouncementService { + return &AnnouncementService{ + announcementRepo: announcementRepo, + readRepo: readRepo, + userRepo: userRepo, + userSubRepo: userSubRepo, + } +} + +type CreateAnnouncementInput struct { + Title string + Content string + Status string + Targeting AnnouncementTargeting + StartsAt *time.Time + EndsAt *time.Time + ActorID *int64 // 管理员用户ID +} + +type UpdateAnnouncementInput struct { + Title *string + Content *string + Status *string + Targeting *AnnouncementTargeting + StartsAt **time.Time + EndsAt **time.Time + ActorID *int64 // 管理员用户ID +} + +type UserAnnouncement struct { + Announcement Announcement + ReadAt *time.Time +} + +type AnnouncementUserReadStatus struct { + UserID int64 `json:"user_id"` + Email string `json:"email"` + Username string `json:"username"` + Balance float64 `json:"balance"` + Eligible bool `json:"eligible"` + ReadAt *time.Time `json:"read_at,omitempty"` +} + +func (s *AnnouncementService) Create(ctx context.Context, input *CreateAnnouncementInput) (*Announcement, error) { + if input == nil { + return nil, fmt.Errorf("create announcement: nil input") + } + + title := strings.TrimSpace(input.Title) + content := strings.TrimSpace(input.Content) + if title == "" || len(title) > 200 { + return nil, fmt.Errorf("create announcement: invalid title") + } + if content == "" { + return nil, fmt.Errorf("create announcement: content is required") + } + + status := strings.TrimSpace(input.Status) + if status == "" { + status = AnnouncementStatusDraft + } + if !isValidAnnouncementStatus(status) { + return nil, fmt.Errorf("create announcement: invalid status") + } + + targeting, err := domain.AnnouncementTargeting(input.Targeting).NormalizeAndValidate() + if err != nil { + return nil, err + } + + if input.StartsAt != nil && input.EndsAt != nil { + if !input.StartsAt.Before(*input.EndsAt) { + return nil, fmt.Errorf("create announcement: starts_at must be before ends_at") + } + } + + a := &Announcement{ + Title: title, + Content: content, + Status: status, + Targeting: targeting, + StartsAt: input.StartsAt, + EndsAt: input.EndsAt, + } + if input.ActorID != nil && *input.ActorID > 0 { + a.CreatedBy = input.ActorID + a.UpdatedBy = input.ActorID + } + + if err := s.announcementRepo.Create(ctx, a); err != nil { + return nil, fmt.Errorf("create announcement: %w", err) + } + return a, nil +} + +func (s *AnnouncementService) Update(ctx context.Context, id int64, input *UpdateAnnouncementInput) (*Announcement, error) { + if input == nil { + return nil, fmt.Errorf("update announcement: nil input") + } + + a, err := s.announcementRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Title != nil { + title := strings.TrimSpace(*input.Title) + if title == "" || len(title) > 200 { + return nil, fmt.Errorf("update announcement: invalid title") + } + a.Title = title + } + if input.Content != nil { + content := strings.TrimSpace(*input.Content) + if content == "" { + return nil, fmt.Errorf("update announcement: content is required") + } + a.Content = content + } + if input.Status != nil { + status := strings.TrimSpace(*input.Status) + if !isValidAnnouncementStatus(status) { + return nil, fmt.Errorf("update announcement: invalid status") + } + a.Status = status + } + + if input.Targeting != nil { + targeting, err := domain.AnnouncementTargeting(*input.Targeting).NormalizeAndValidate() + if err != nil { + return nil, err + } + a.Targeting = targeting + } + + if input.StartsAt != nil { + a.StartsAt = *input.StartsAt + } + if input.EndsAt != nil { + a.EndsAt = *input.EndsAt + } + + if a.StartsAt != nil && a.EndsAt != nil { + if !a.StartsAt.Before(*a.EndsAt) { + return nil, fmt.Errorf("update announcement: starts_at must be before ends_at") + } + } + + if input.ActorID != nil && *input.ActorID > 0 { + a.UpdatedBy = input.ActorID + } + + if err := s.announcementRepo.Update(ctx, a); err != nil { + return nil, fmt.Errorf("update announcement: %w", err) + } + return a, nil +} + +func (s *AnnouncementService) Delete(ctx context.Context, id int64) error { + if err := s.announcementRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete announcement: %w", err) + } + return nil +} + +func (s *AnnouncementService) GetByID(ctx context.Context, id int64) (*Announcement, error) { + return s.announcementRepo.GetByID(ctx, id) +} + +func (s *AnnouncementService) List(ctx context.Context, params pagination.PaginationParams, filters AnnouncementListFilters) ([]Announcement, *pagination.PaginationResult, error) { + return s.announcementRepo.List(ctx, params, filters) +} + +func (s *AnnouncementService) ListForUser(ctx context.Context, userID int64, unreadOnly bool) ([]UserAnnouncement, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + activeSubs, err := s.userSubRepo.ListActiveByUserID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("list active subscriptions: %w", err) + } + activeGroupIDs := make(map[int64]struct{}, len(activeSubs)) + for i := range activeSubs { + activeGroupIDs[activeSubs[i].GroupID] = struct{}{} + } + + now := time.Now() + anns, err := s.announcementRepo.ListActive(ctx, now) + if err != nil { + return nil, fmt.Errorf("list active announcements: %w", err) + } + + visible := make([]Announcement, 0, len(anns)) + ids := make([]int64, 0, len(anns)) + for i := range anns { + a := anns[i] + if !a.IsActiveAt(now) { + continue + } + if !a.Targeting.Matches(user.Balance, activeGroupIDs) { + continue + } + visible = append(visible, a) + ids = append(ids, a.ID) + } + + if len(visible) == 0 { + return []UserAnnouncement{}, nil + } + + readMap, err := s.readRepo.GetReadMapByUser(ctx, userID, ids) + if err != nil { + return nil, fmt.Errorf("get read map: %w", err) + } + + out := make([]UserAnnouncement, 0, len(visible)) + for i := range visible { + a := visible[i] + readAt, ok := readMap[a.ID] + if unreadOnly && ok { + continue + } + var ptr *time.Time + if ok { + t := readAt + ptr = &t + } + out = append(out, UserAnnouncement{ + Announcement: a, + ReadAt: ptr, + }) + } + + // 未读优先、同状态按创建时间倒序 + sort.Slice(out, func(i, j int) bool { + ai, aj := out[i], out[j] + if (ai.ReadAt == nil) != (aj.ReadAt == nil) { + return ai.ReadAt == nil + } + return ai.Announcement.ID > aj.Announcement.ID + }) + + return out, nil +} + +func (s *AnnouncementService) MarkRead(ctx context.Context, userID, announcementID int64) error { + // 安全:仅允许标记当前用户“可见”的公告 + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + a, err := s.announcementRepo.GetByID(ctx, announcementID) + if err != nil { + return err + } + + now := time.Now() + if !a.IsActiveAt(now) { + return ErrAnnouncementNotFound + } + + activeSubs, err := s.userSubRepo.ListActiveByUserID(ctx, userID) + if err != nil { + return fmt.Errorf("list active subscriptions: %w", err) + } + activeGroupIDs := make(map[int64]struct{}, len(activeSubs)) + for i := range activeSubs { + activeGroupIDs[activeSubs[i].GroupID] = struct{}{} + } + + if !a.Targeting.Matches(user.Balance, activeGroupIDs) { + return ErrAnnouncementNotFound + } + + if err := s.readRepo.MarkRead(ctx, announcementID, userID, now); err != nil { + return fmt.Errorf("mark read: %w", err) + } + return nil +} + +func (s *AnnouncementService) ListUserReadStatus( + ctx context.Context, + announcementID int64, + params pagination.PaginationParams, + search string, +) ([]AnnouncementUserReadStatus, *pagination.PaginationResult, error) { + ann, err := s.announcementRepo.GetByID(ctx, announcementID) + if err != nil { + return nil, nil, err + } + + filters := UserListFilters{ + Search: strings.TrimSpace(search), + } + + users, page, err := s.userRepo.ListWithFilters(ctx, params, filters) + if err != nil { + return nil, nil, fmt.Errorf("list users: %w", err) + } + + userIDs := make([]int64, 0, len(users)) + for i := range users { + userIDs = append(userIDs, users[i].ID) + } + + readMap, err := s.readRepo.GetReadMapByUsers(ctx, announcementID, userIDs) + if err != nil { + return nil, nil, fmt.Errorf("get read map: %w", err) + } + + out := make([]AnnouncementUserReadStatus, 0, len(users)) + for i := range users { + u := users[i] + subs, err := s.userSubRepo.ListActiveByUserID(ctx, u.ID) + if err != nil { + return nil, nil, fmt.Errorf("list active subscriptions: %w", err) + } + activeGroupIDs := make(map[int64]struct{}, len(subs)) + for j := range subs { + activeGroupIDs[subs[j].GroupID] = struct{}{} + } + + readAt, ok := readMap[u.ID] + var ptr *time.Time + if ok { + t := readAt + ptr = &t + } + + out = append(out, AnnouncementUserReadStatus{ + UserID: u.ID, + Email: u.Email, + Username: u.Username, + Balance: u.Balance, + Eligible: domain.AnnouncementTargeting(ann.Targeting).Matches(u.Balance, activeGroupIDs), + ReadAt: ptr, + }) + } + + return out, page, nil +} + +func isValidAnnouncementStatus(status string) bool { + switch status { + case AnnouncementStatusDraft, AnnouncementStatusActive, AnnouncementStatusArchived: + return true + default: + return false + } +} diff --git a/backend/internal/service/announcement_targeting_test.go b/backend/internal/service/announcement_targeting_test.go new file mode 100644 index 00000000..fffea26b --- /dev/null +++ b/backend/internal/service/announcement_targeting_test.go @@ -0,0 +1,67 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAnnouncementTargeting_Matches_EmptyMatchesAll(t *testing.T) { + var targeting AnnouncementTargeting + require.True(t, targeting.Matches(0, nil)) + require.True(t, targeting.Matches(123.45, map[int64]struct{}{1: {}})) +} + +func TestAnnouncementTargeting_NormalizeAndValidate_RejectsEmptyGroup(t *testing.T) { + targeting := AnnouncementTargeting{ + AnyOf: []AnnouncementConditionGroup{ + {AllOf: nil}, + }, + } + _, err := targeting.NormalizeAndValidate() + require.Error(t, err) + require.ErrorIs(t, err, ErrAnnouncementInvalidTarget) +} + +func TestAnnouncementTargeting_NormalizeAndValidate_RejectsInvalidCondition(t *testing.T) { + targeting := AnnouncementTargeting{ + AnyOf: []AnnouncementConditionGroup{ + { + AllOf: []AnnouncementCondition{ + {Type: "balance", Operator: "between", Value: 10}, + }, + }, + }, + } + _, err := targeting.NormalizeAndValidate() + require.Error(t, err) + require.ErrorIs(t, err, ErrAnnouncementInvalidTarget) +} + +func TestAnnouncementTargeting_Matches_AndOrSemantics(t *testing.T) { + targeting := AnnouncementTargeting{ + AnyOf: []AnnouncementConditionGroup{ + { + AllOf: []AnnouncementCondition{ + {Type: AnnouncementConditionTypeBalance, Operator: AnnouncementOperatorGTE, Value: 100}, + {Type: AnnouncementConditionTypeSubscription, Operator: AnnouncementOperatorIn, GroupIDs: []int64{10}}, + }, + }, + { + AllOf: []AnnouncementCondition{ + {Type: AnnouncementConditionTypeBalance, Operator: AnnouncementOperatorLT, Value: 5}, + }, + }, + }, + } + + // 命中第 2 组(balance < 5) + require.True(t, targeting.Matches(4.99, nil)) + require.False(t, targeting.Matches(5, nil)) + + // 命中第 1 组(balance >= 100 AND 订阅 in [10]) + require.False(t, targeting.Matches(100, map[int64]struct{}{})) + require.False(t, targeting.Matches(99.9, map[int64]struct{}{10: {}})) + require.True(t, targeting.Matches(100, map[int64]struct{}{10: {}})) +} + diff --git a/backend/internal/service/domain_constants.go b/backend/internal/service/domain_constants.go index 44df9073..eee8bddd 100644 --- a/backend/internal/service/domain_constants.go +++ b/backend/internal/service/domain_constants.go @@ -1,66 +1,68 @@ package service +import "github.com/Wei-Shaw/sub2api/internal/domain" + // Status constants const ( - StatusActive = "active" - StatusDisabled = "disabled" - StatusError = "error" - StatusUnused = "unused" - StatusUsed = "used" - StatusExpired = "expired" + StatusActive = domain.StatusActive + StatusDisabled = domain.StatusDisabled + StatusError = domain.StatusError + StatusUnused = domain.StatusUnused + StatusUsed = domain.StatusUsed + StatusExpired = domain.StatusExpired ) // Role constants const ( - RoleAdmin = "admin" - RoleUser = "user" + RoleAdmin = domain.RoleAdmin + RoleUser = domain.RoleUser ) // Platform constants const ( - PlatformAnthropic = "anthropic" - PlatformOpenAI = "openai" - PlatformGemini = "gemini" - PlatformAntigravity = "antigravity" + PlatformAnthropic = domain.PlatformAnthropic + PlatformOpenAI = domain.PlatformOpenAI + PlatformGemini = domain.PlatformGemini + PlatformAntigravity = domain.PlatformAntigravity ) // Account type constants const ( - AccountTypeOAuth = "oauth" // OAuth类型账号(full scope: profile + inference) - AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope) - AccountTypeAPIKey = "apikey" // API Key类型账号 + AccountTypeOAuth = domain.AccountTypeOAuth // OAuth类型账号(full scope: profile + inference) + AccountTypeSetupToken = domain.AccountTypeSetupToken // Setup Token类型账号(inference only scope) + AccountTypeAPIKey = domain.AccountTypeAPIKey // API Key类型账号 ) // Redeem type constants const ( - RedeemTypeBalance = "balance" - RedeemTypeConcurrency = "concurrency" - RedeemTypeSubscription = "subscription" + RedeemTypeBalance = domain.RedeemTypeBalance + RedeemTypeConcurrency = domain.RedeemTypeConcurrency + RedeemTypeSubscription = domain.RedeemTypeSubscription ) // PromoCode status constants const ( - PromoCodeStatusActive = "active" - PromoCodeStatusDisabled = "disabled" + PromoCodeStatusActive = domain.PromoCodeStatusActive + PromoCodeStatusDisabled = domain.PromoCodeStatusDisabled ) // Admin adjustment type constants const ( - AdjustmentTypeAdminBalance = "admin_balance" // 管理员调整余额 - AdjustmentTypeAdminConcurrency = "admin_concurrency" // 管理员调整并发数 + AdjustmentTypeAdminBalance = domain.AdjustmentTypeAdminBalance // 管理员调整余额 + AdjustmentTypeAdminConcurrency = domain.AdjustmentTypeAdminConcurrency // 管理员调整并发数 ) // Group subscription type constants const ( - SubscriptionTypeStandard = "standard" // 标准计费模式(按余额扣费) - SubscriptionTypeSubscription = "subscription" // 订阅模式(按限额控制) + SubscriptionTypeStandard = domain.SubscriptionTypeStandard // 标准计费模式(按余额扣费) + SubscriptionTypeSubscription = domain.SubscriptionTypeSubscription // 订阅模式(按限额控制) ) // Subscription status constants const ( - SubscriptionStatusActive = "active" - SubscriptionStatusExpired = "expired" - SubscriptionStatusSuspended = "suspended" + SubscriptionStatusActive = domain.SubscriptionStatusActive + SubscriptionStatusExpired = domain.SubscriptionStatusExpired + SubscriptionStatusSuspended = domain.SubscriptionStatusSuspended ) // LinuxDoConnectSyntheticEmailDomain 是 LinuxDo Connect 用户的合成邮箱后缀(RFC 保留域名)。 diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index df86b2e7..096e15a0 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -226,6 +226,7 @@ var ProviderSet = wire.NewSet( ProvidePricingService, NewBillingService, NewBillingCacheService, + NewAnnouncementService, NewAdminService, NewGatewayService, NewOpenAIGatewayService, diff --git a/backend/migrations/045_add_announcements.sql b/backend/migrations/045_add_announcements.sql new file mode 100644 index 00000000..cfb9b4b5 --- /dev/null +++ b/backend/migrations/045_add_announcements.sql @@ -0,0 +1,44 @@ +-- 创建公告表 +CREATE TABLE IF NOT EXISTS announcements ( + id BIGSERIAL PRIMARY KEY, + title VARCHAR(200) NOT NULL, + content TEXT NOT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'draft', + targeting JSONB NOT NULL DEFAULT '{}'::jsonb, + starts_at TIMESTAMPTZ DEFAULT NULL, + ends_at TIMESTAMPTZ DEFAULT NULL, + created_by BIGINT DEFAULT NULL REFERENCES users(id) ON DELETE SET NULL, + updated_by BIGINT DEFAULT NULL REFERENCES users(id) ON DELETE SET NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- 公告已读表 +CREATE TABLE IF NOT EXISTS announcement_reads ( + id BIGSERIAL PRIMARY KEY, + announcement_id BIGINT NOT NULL REFERENCES announcements(id) ON DELETE CASCADE, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + read_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(announcement_id, user_id) +); + +-- 索引 +CREATE INDEX IF NOT EXISTS idx_announcements_status ON announcements(status); +CREATE INDEX IF NOT EXISTS idx_announcements_starts_at ON announcements(starts_at); +CREATE INDEX IF NOT EXISTS idx_announcements_ends_at ON announcements(ends_at); +CREATE INDEX IF NOT EXISTS idx_announcements_created_at ON announcements(created_at); + +CREATE INDEX IF NOT EXISTS idx_announcement_reads_announcement_id ON announcement_reads(announcement_id); +CREATE INDEX IF NOT EXISTS idx_announcement_reads_user_id ON announcement_reads(user_id); +CREATE INDEX IF NOT EXISTS idx_announcement_reads_read_at ON announcement_reads(read_at); + +COMMENT ON TABLE announcements IS '系统公告'; +COMMENT ON COLUMN announcements.status IS '状态: draft, active, archived'; +COMMENT ON COLUMN announcements.targeting IS '展示条件(JSON 规则)'; +COMMENT ON COLUMN announcements.starts_at IS '开始展示时间(为空表示立即生效)'; +COMMENT ON COLUMN announcements.ends_at IS '结束展示时间(为空表示永久生效)'; + +COMMENT ON TABLE announcement_reads IS '公告已读记录'; +COMMENT ON COLUMN announcement_reads.read_at IS '用户首次已读时间'; + diff --git a/frontend/src/api/admin/announcements.ts b/frontend/src/api/admin/announcements.ts new file mode 100644 index 00000000..d02fdda7 --- /dev/null +++ b/frontend/src/api/admin/announcements.ts @@ -0,0 +1,71 @@ +/** + * Admin Announcements API endpoints + */ + +import { apiClient } from '../client' +import type { + Announcement, + AnnouncementUserReadStatus, + BasePaginationResponse, + CreateAnnouncementRequest, + UpdateAnnouncementRequest +} from '@/types' + +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + status?: string + search?: string + } +): Promise> { + const { data } = await apiClient.get>('/admin/announcements', { + params: { page, page_size: pageSize, ...filters } + }) + return data +} + +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/announcements/${id}`) + return data +} + +export async function create(request: CreateAnnouncementRequest): Promise { + const { data } = await apiClient.post('/admin/announcements', request) + return data +} + +export async function update(id: number, request: UpdateAnnouncementRequest): Promise { + const { data } = await apiClient.put(`/admin/announcements/${id}`, request) + return data +} + +export async function deleteAnnouncement(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/announcements/${id}`) + return data +} + +export async function getReadStatus( + id: number, + page: number = 1, + pageSize: number = 20, + search: string = '' +): Promise> { + const { data } = await apiClient.get>( + `/admin/announcements/${id}/read-status`, + { params: { page, page_size: pageSize, search } } + ) + return data +} + +const announcementsAPI = { + list, + getById, + create, + update, + delete: deleteAnnouncement, + getReadStatus +} + +export default announcementsAPI + diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts index e86f6348..a88b02c6 100644 --- a/frontend/src/api/admin/index.ts +++ b/frontend/src/api/admin/index.ts @@ -10,6 +10,7 @@ import accountsAPI from './accounts' import proxiesAPI from './proxies' import redeemAPI from './redeem' import promoAPI from './promo' +import announcementsAPI from './announcements' import settingsAPI from './settings' import systemAPI from './system' import subscriptionsAPI from './subscriptions' @@ -30,6 +31,7 @@ export const adminAPI = { proxies: proxiesAPI, redeem: redeemAPI, promo: promoAPI, + announcements: announcementsAPI, settings: settingsAPI, system: systemAPI, subscriptions: subscriptionsAPI, @@ -48,6 +50,7 @@ export { proxiesAPI, redeemAPI, promoAPI, + announcementsAPI, settingsAPI, systemAPI, subscriptionsAPI, diff --git a/frontend/src/api/announcements.ts b/frontend/src/api/announcements.ts new file mode 100644 index 00000000..a9034e2a --- /dev/null +++ b/frontend/src/api/announcements.ts @@ -0,0 +1,26 @@ +/** + * User Announcements API endpoints + */ + +import { apiClient } from './client' +import type { UserAnnouncement } from '@/types' + +export async function list(unreadOnly: boolean = false): Promise { + const { data } = await apiClient.get('/announcements', { + params: unreadOnly ? { unread_only: 1 } : {} + }) + return data +} + +export async function markRead(id: number): Promise<{ message: string }> { + const { data } = await apiClient.post<{ message: string }>(`/announcements/${id}/read`) + return data +} + +const announcementsAPI = { + list, + markRead +} + +export default announcementsAPI + diff --git a/frontend/src/api/index.ts b/frontend/src/api/index.ts index 347d0b94..070ce648 100644 --- a/frontend/src/api/index.ts +++ b/frontend/src/api/index.ts @@ -16,6 +16,7 @@ export { userAPI } from './user' export { redeemAPI, type RedeemHistoryItem } from './redeem' export { userGroupsAPI } from './groups' export { totpAPI } from './totp' +export { default as announcementsAPI } from './announcements' // Admin APIs export { adminAPI } from './admin' diff --git a/frontend/src/components/admin/announcements/AnnouncementReadStatusDialog.vue b/frontend/src/components/admin/announcements/AnnouncementReadStatusDialog.vue new file mode 100644 index 00000000..e7d991a8 --- /dev/null +++ b/frontend/src/components/admin/announcements/AnnouncementReadStatusDialog.vue @@ -0,0 +1,186 @@ + + + diff --git a/frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue b/frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue new file mode 100644 index 00000000..bd90af42 --- /dev/null +++ b/frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue @@ -0,0 +1,388 @@ + + + diff --git a/frontend/src/components/layout/AppSidebar.vue b/frontend/src/components/layout/AppSidebar.vue index 474e4390..c685a2fa 100644 --- a/frontend/src/components/layout/AppSidebar.vue +++ b/frontend/src/components/layout/AppSidebar.vue @@ -319,6 +319,21 @@ const ServerIcon = { ) } +const BellIcon = { + render: () => + h( + 'svg', + { fill: 'none', viewBox: '0 0 24 24', stroke: 'currentColor', 'stroke-width': '1.5' }, + [ + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M14.857 17.082a23.848 23.848 0 005.454-1.31A8.967 8.967 0 0118 9.75V9a6 6 0 10-12 0v.75a8.967 8.967 0 01-2.312 6.022c1.733.64 3.56 1.085 5.455 1.31m5.714 0a24.255 24.255 0 01-5.714 0m5.714 0a3 3 0 11-5.714 0' + }) + ] + ) +} + const TicketIcon = { render: () => h( @@ -418,6 +433,7 @@ const ChevronDoubleRightIcon = { const userNavItems = computed(() => { const items = [ { path: '/dashboard', label: t('nav.dashboard'), icon: DashboardIcon }, + { path: '/announcements', label: t('nav.announcements'), icon: BellIcon }, { path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon }, { path: '/usage', label: t('nav.usage'), icon: ChartIcon, hideInSimpleMode: true }, { path: '/subscriptions', label: t('nav.mySubscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, @@ -440,6 +456,7 @@ const userNavItems = computed(() => { // Personal navigation items (for admin's "My Account" section, without Dashboard) const personalNavItems = computed(() => { const items = [ + { path: '/announcements', label: t('nav.announcements'), icon: BellIcon }, { path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon }, { path: '/usage', label: t('nav.usage'), icon: ChartIcon, hideInSimpleMode: true }, { path: '/subscriptions', label: t('nav.mySubscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, @@ -470,6 +487,7 @@ const adminNavItems = computed(() => { { path: '/admin/groups', label: t('nav.groups'), icon: FolderIcon, hideInSimpleMode: true }, { path: '/admin/subscriptions', label: t('nav.subscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, { path: '/admin/accounts', label: t('nav.accounts'), icon: GlobeIcon }, + { path: '/admin/announcements', label: t('nav.announcements'), icon: BellIcon }, { path: '/admin/proxies', label: t('nav.proxies'), icon: ServerIcon }, { path: '/admin/redeem', label: t('nav.redeemCodes'), icon: TicketIcon, hideInSimpleMode: true }, { path: '/admin/promo-codes', label: t('nav.promoCodes'), icon: GiftIcon, hideInSimpleMode: true }, diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index dc93d37c..3ceaa063 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -185,6 +185,7 @@ export default { // Navigation nav: { dashboard: 'Dashboard', + announcements: 'Announcements', apiKeys: 'API Keys', usage: 'Usage', redeem: 'Redeem', @@ -1951,6 +1952,73 @@ export default { } }, + // Announcements + announcements: { + title: 'Announcements', + description: 'Create announcements and target by conditions', + createAnnouncement: 'Create Announcement', + editAnnouncement: 'Edit Announcement', + deleteAnnouncement: 'Delete Announcement', + searchAnnouncements: 'Search announcements...', + status: 'Status', + allStatus: 'All Status', + columns: { + title: 'Title', + status: 'Status', + targeting: 'Targeting', + timeRange: 'Schedule', + createdAt: 'Created At', + actions: 'Actions' + }, + statusLabels: { + draft: 'Draft', + active: 'Active', + archived: 'Archived' + }, + form: { + title: 'Title', + content: 'Content (Markdown supported)', + status: 'Status', + startsAt: 'Starts At', + endsAt: 'Ends At', + startsAtHint: 'Leave empty to start immediately', + endsAtHint: 'Leave empty to never expire', + targetingMode: 'Targeting', + targetingAll: 'All users', + targetingCustom: 'Custom rules', + addOrGroup: 'Add OR group', + addAndCondition: 'Add AND condition', + conditionType: 'Condition type', + conditionSubscription: 'Subscription', + conditionBalance: 'Balance', + operator: 'Operator', + balanceValue: 'Balance threshold', + selectPackages: 'Select packages' + }, + operators: { + gt: '>', + gte: '≥', + lt: '<', + lte: '≤', + eq: '=' + }, + targetingSummaryAll: 'All users', + targetingSummaryCustom: 'Custom ({groups} groups)', + timeImmediate: 'Immediate', + timeNever: 'Never', + readStatus: 'Read Status', + eligible: 'Eligible', + readAt: 'Read at', + unread: 'Unread', + searchUsers: 'Search users...', + failedToLoad: 'Failed to load announcements', + failedToCreate: 'Failed to create announcement', + failedToUpdate: 'Failed to update announcement', + failedToDelete: 'Failed to delete announcement', + failedToLoadReadStatus: 'Failed to load read status', + deleteConfirm: 'Are you sure you want to delete this announcement? This action cannot be undone.' + }, + // Promo Codes promo: { title: 'Promo Code Management', @@ -3063,6 +3131,21 @@ export default { 'The administrator enabled the entry but has not configured a purchase URL. Please contact admin.' }, + // Announcements Page + announcements: { + title: 'Announcements', + description: 'View system announcements', + unreadOnly: 'Show unread only', + markRead: 'Mark as read', + readAt: 'Read at', + read: 'Read', + unread: 'Unread', + startsAt: 'Starts at', + endsAt: 'Ends at', + empty: 'No announcements', + emptyUnread: 'No unread announcements' + }, + // User Subscriptions Page userSubscriptions: { title: 'My Subscriptions', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 4b6a9be6..0b456624 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -182,6 +182,7 @@ export default { // Navigation nav: { dashboard: '仪表盘', + announcements: '公告', apiKeys: 'API 密钥', usage: '使用记录', redeem: '兑换', @@ -2098,6 +2099,73 @@ export default { failedToDelete: '删除兑换码失败' }, + // Announcements + announcements: { + title: '公告管理', + description: '创建公告并按条件投放', + createAnnouncement: '创建公告', + editAnnouncement: '编辑公告', + deleteAnnouncement: '删除公告', + searchAnnouncements: '搜索公告...', + status: '状态', + allStatus: '全部状态', + columns: { + title: '标题', + status: '状态', + targeting: '展示条件', + timeRange: '有效期', + createdAt: '创建时间', + actions: '操作' + }, + statusLabels: { + draft: '草稿', + active: '展示中', + archived: '已归档' + }, + form: { + title: '标题', + content: '内容(支持 Markdown)', + status: '状态', + startsAt: '开始时间', + endsAt: '结束时间', + startsAtHint: '留空表示立即生效', + endsAtHint: '留空表示永久生效', + targetingMode: '展示条件', + targetingAll: '所有用户', + targetingCustom: '按条件', + addOrGroup: '添加 OR 条件组', + addAndCondition: '添加 AND 条件', + conditionType: '条件类型', + conditionSubscription: '订阅套餐', + conditionBalance: '余额', + operator: '运算符', + balanceValue: '余额阈值', + selectPackages: '选择套餐' + }, + operators: { + gt: '>', + gte: '≥', + lt: '<', + lte: '≤', + eq: '=' + }, + targetingSummaryAll: '全部用户', + targetingSummaryCustom: '自定义({groups} 组)', + timeImmediate: '立即', + timeNever: '永久', + readStatus: '已读情况', + eligible: '符合条件', + readAt: '已读时间', + unread: '未读', + searchUsers: '搜索用户...', + failedToLoad: '加载公告失败', + failedToCreate: '创建公告失败', + failedToUpdate: '更新公告失败', + failedToDelete: '删除公告失败', + failedToLoadReadStatus: '加载已读情况失败', + deleteConfirm: '确定要删除该公告吗?此操作无法撤销。' + }, + // Promo Codes promo: { title: '优惠码管理', @@ -3212,6 +3280,21 @@ export default { notConfiguredDesc: '管理员已开启入口,但尚未配置购买订阅链接,请联系管理员。' }, + // Announcements Page + announcements: { + title: '公告', + description: '查看系统公告', + unreadOnly: '仅显示未读', + markRead: '标记已读', + readAt: '已读时间', + read: '已读', + unread: '未读', + startsAt: '开始时间', + endsAt: '结束时间', + empty: '暂无公告', + emptyUnread: '暂无未读公告' + }, + // User Subscriptions Page userSubscriptions: { title: '我的订阅', diff --git a/frontend/src/router/index.ts b/frontend/src/router/index.ts index a8ddc67f..4e2c1147 100644 --- a/frontend/src/router/index.ts +++ b/frontend/src/router/index.ts @@ -187,6 +187,18 @@ const routes: RouteRecordRaw[] = [ descriptionKey: 'purchase.description' } }, + { + path: '/announcements', + name: 'Announcements', + component: () => import('@/views/user/AnnouncementsView.vue'), + meta: { + requiresAuth: true, + requiresAdmin: false, + title: 'Announcements', + titleKey: 'announcements.title', + descriptionKey: 'announcements.description' + } + }, // ==================== Admin Routes ==================== { @@ -265,6 +277,18 @@ const routes: RouteRecordRaw[] = [ descriptionKey: 'admin.accounts.description' } }, + { + path: '/admin/announcements', + name: 'AdminAnnouncements', + component: () => import('@/views/admin/AnnouncementsView.vue'), + meta: { + requiresAuth: true, + requiresAdmin: true, + title: 'Announcements', + titleKey: 'admin.announcements.title', + descriptionKey: 'admin.announcements.description' + } + }, { path: '/admin/proxies', name: 'AdminProxies', diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 6f3b972e..9802d5c8 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -129,6 +129,81 @@ export interface UpdateSubscriptionRequest { is_active?: boolean } +// ==================== Announcement Types ==================== + +export type AnnouncementStatus = 'draft' | 'active' | 'archived' + +export type AnnouncementConditionType = 'subscription' | 'balance' + +export type AnnouncementOperator = 'in' | 'gt' | 'gte' | 'lt' | 'lte' | 'eq' + +export interface AnnouncementCondition { + type: AnnouncementConditionType + operator: AnnouncementOperator + group_ids?: number[] + value?: number +} + +export interface AnnouncementConditionGroup { + all_of?: AnnouncementCondition[] +} + +export interface AnnouncementTargeting { + any_of?: AnnouncementConditionGroup[] +} + +export interface Announcement { + id: number + title: string + content: string + status: AnnouncementStatus + targeting: AnnouncementTargeting + starts_at?: string + ends_at?: string + created_by?: number + updated_by?: number + created_at: string + updated_at: string +} + +export interface UserAnnouncement { + id: number + title: string + content: string + starts_at?: string + ends_at?: string + read_at?: string + created_at: string + updated_at: string +} + +export interface CreateAnnouncementRequest { + title: string + content: string + status?: AnnouncementStatus + targeting: AnnouncementTargeting + starts_at?: number + ends_at?: number +} + +export interface UpdateAnnouncementRequest { + title?: string + content?: string + status?: AnnouncementStatus + targeting?: AnnouncementTargeting + starts_at?: number + ends_at?: number +} + +export interface AnnouncementUserReadStatus { + user_id: number + email: string + username: string + balance: number + eligible: boolean + read_at?: string +} + // ==================== Proxy Node Types ==================== export interface ProxyNode { diff --git a/frontend/src/views/admin/AnnouncementsView.vue b/frontend/src/views/admin/AnnouncementsView.vue new file mode 100644 index 00000000..38574454 --- /dev/null +++ b/frontend/src/views/admin/AnnouncementsView.vue @@ -0,0 +1,538 @@ + + + diff --git a/frontend/src/views/user/AnnouncementsView.vue b/frontend/src/views/user/AnnouncementsView.vue new file mode 100644 index 00000000..99ea253e --- /dev/null +++ b/frontend/src/views/user/AnnouncementsView.vue @@ -0,0 +1,140 @@ + + + From 9bee0a20713322adca3c7a0bf3d53b9b338cba74 Mon Sep 17 00:00:00 2001 From: ducky Date: Fri, 30 Jan 2026 17:28:53 +0800 Subject: [PATCH 061/214] chore: gofmt for golangci-lint --- backend/ent/schema/announcement.go | 1 - backend/ent/schema/announcement_read.go | 1 - .../internal/handler/admin/announcement_handler.go | 11 +++++------ backend/internal/handler/announcement_handler.go | 1 - backend/internal/handler/dto/announcement.go | 1 - backend/internal/repository/announcement_read_repo.go | 1 - backend/internal/repository/announcement_repo.go | 1 - .../internal/service/announcement_targeting_test.go | 1 - 8 files changed, 5 insertions(+), 13 deletions(-) diff --git a/backend/ent/schema/announcement.go b/backend/ent/schema/announcement.go index 3b534831..1568778f 100644 --- a/backend/ent/schema/announcement.go +++ b/backend/ent/schema/announcement.go @@ -88,4 +88,3 @@ func (Announcement) Indexes() []ent.Index { index.Fields("ends_at"), } } - diff --git a/backend/ent/schema/announcement_read.go b/backend/ent/schema/announcement_read.go index 2f80d8b2..e0b50777 100644 --- a/backend/ent/schema/announcement_read.go +++ b/backend/ent/schema/announcement_read.go @@ -63,4 +63,3 @@ func (AnnouncementRead) Indexes() []ent.Index { index.Fields("announcement_id", "user_id").Unique(), } } - diff --git a/backend/internal/handler/admin/announcement_handler.go b/backend/internal/handler/admin/announcement_handler.go index a4e9f2f0..0b5d0fbc 100644 --- a/backend/internal/handler/admin/announcement_handler.go +++ b/backend/internal/handler/admin/announcement_handler.go @@ -27,12 +27,12 @@ func NewAnnouncementHandler(announcementService *service.AnnouncementService) *A } type CreateAnnouncementRequest struct { - Title string `json:"title" binding:"required"` - Content string `json:"content" binding:"required"` - Status string `json:"status" binding:"omitempty,oneof=draft active archived"` + Title string `json:"title" binding:"required"` + Content string `json:"content" binding:"required"` + Status string `json:"status" binding:"omitempty,oneof=draft active archived"` Targeting service.AnnouncementTargeting `json:"targeting"` - StartsAt *int64 `json:"starts_at"` // Unix seconds, 0/empty = immediate - EndsAt *int64 `json:"ends_at"` // Unix seconds, 0/empty = never + StartsAt *int64 `json:"starts_at"` // Unix seconds, 0/empty = immediate + EndsAt *int64 `json:"ends_at"` // Unix seconds, 0/empty = never } type UpdateAnnouncementRequest struct { @@ -244,4 +244,3 @@ func (h *AnnouncementHandler) ListReadStatus(c *gin.Context) { response.Paginated(c, items, paginationResult.Total, page, pageSize) } - diff --git a/backend/internal/handler/announcement_handler.go b/backend/internal/handler/announcement_handler.go index 1e1424eb..72823eaf 100644 --- a/backend/internal/handler/announcement_handler.go +++ b/backend/internal/handler/announcement_handler.go @@ -79,4 +79,3 @@ func parseBoolQuery(v string) bool { return false } } - diff --git a/backend/internal/handler/dto/announcement.go b/backend/internal/handler/dto/announcement.go index ec2a8ca7..bc0db1b2 100644 --- a/backend/internal/handler/dto/announcement.go +++ b/backend/internal/handler/dto/announcement.go @@ -72,4 +72,3 @@ func UserAnnouncementFromService(a *service.UserAnnouncement) *UserAnnouncement UpdatedAt: a.Announcement.UpdatedAt, } } - diff --git a/backend/internal/repository/announcement_read_repo.go b/backend/internal/repository/announcement_read_repo.go index 1c6b480a..2dc346b1 100644 --- a/backend/internal/repository/announcement_read_repo.go +++ b/backend/internal/repository/announcement_read_repo.go @@ -81,4 +81,3 @@ func (r *announcementReadRepository) CountByAnnouncementID(ctx context.Context, } return int64(count), nil } - diff --git a/backend/internal/repository/announcement_repo.go b/backend/internal/repository/announcement_repo.go index edeb82e6..52029e4e 100644 --- a/backend/internal/repository/announcement_repo.go +++ b/backend/internal/repository/announcement_repo.go @@ -192,4 +192,3 @@ func announcementEntitiesToService(models []*dbent.Announcement) []service.Annou } return out } - diff --git a/backend/internal/service/announcement_targeting_test.go b/backend/internal/service/announcement_targeting_test.go index fffea26b..4d904c7d 100644 --- a/backend/internal/service/announcement_targeting_test.go +++ b/backend/internal/service/announcement_targeting_test.go @@ -64,4 +64,3 @@ func TestAnnouncementTargeting_Matches_AndOrSemantics(t *testing.T) { require.False(t, targeting.Matches(99.9, map[int64]struct{}{10: {}})) require.True(t, targeting.Matches(100, map[int64]struct{}{10: {}})) } - From fe17058700a2664597ef02b27e6995e35a7804bc Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 31 Jan 2026 01:40:38 +0800 Subject: [PATCH 062/214] refactor: limit OpenCode keyword replacement to tool descriptions --- .../internal/service/gateway_sanitize_test.go | 6 ++--- backend/internal/service/gateway_service.go | 22 +++++++++++++++---- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/backend/internal/service/gateway_sanitize_test.go b/backend/internal/service/gateway_sanitize_test.go index 3b0a07c9..a70c1a00 100644 --- a/backend/internal/service/gateway_sanitize_test.go +++ b/backend/internal/service/gateway_sanitize_test.go @@ -9,12 +9,12 @@ import ( func TestSanitizeOpenCodeText_RewritesCanonicalSentence(t *testing.T) { in := "You are OpenCode, the best coding agent on the planet." - got := sanitizeOpenCodeText(in) + got := sanitizeSystemText(in) require.Equal(t, strings.TrimSpace(claudeCodeSystemPrompt), got) } -func TestSanitizeOpenCodeText_RewritesOpenCodeKeywords(t *testing.T) { +func TestSanitizeToolText_RewritesOpenCodeKeywords(t *testing.T) { in := "OpenCode and opencode are mentioned." - got := sanitizeOpenCodeText(in) + got := sanitizeToolText(in) require.Equal(t, "Claude Code and Claude are mentioned.", got) } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 47ea8593..40354e48 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -697,7 +697,10 @@ func normalizeParamNameForOpenCode(name string, cache map[string]string) string return name } -func sanitizeOpenCodeText(text string) string { +// sanitizeSystemText rewrites only the fixed OpenCode identity sentence (if present). +// We intentionally avoid broad keyword replacement in system prompts to prevent +// accidentally changing user-provided instructions. +func sanitizeSystemText(text string) string { if text == "" { return text } @@ -709,6 +712,17 @@ func sanitizeOpenCodeText(text string) string { "You are OpenCode, the best coding agent on the planet.", strings.TrimSpace(claudeCodeSystemPrompt), ) + return text +} + +// sanitizeToolText is intentionally more aggressive than sanitizeSystemText because +// tool descriptions are not user chat content, and some upstreams may flag "opencode" +// strings as non-Claude-Code fingerprints. +func sanitizeToolText(text string) string { + if text == "" { + return text + } + text = sanitizeSystemText(text) text = strings.ReplaceAll(text, "OpenCode", "Claude Code") text = opencodeTextRe.ReplaceAllString(text, "Claude") return text @@ -720,7 +734,7 @@ func sanitizeToolDescription(description string) string { } description = toolDescAbsPathRe.ReplaceAllString(description, "[path]") description = toolDescWinPathRe.ReplaceAllString(description, "[path]") - return sanitizeOpenCodeText(description) + return sanitizeToolText(description) } func normalizeToolInputSchema(inputSchema any, cache map[string]string) { @@ -795,7 +809,7 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu if system, ok := req["system"]; ok { switch v := system.(type) { case string: - sanitized := sanitizeOpenCodeText(v) + sanitized := sanitizeSystemText(v) if sanitized != v { req["system"] = sanitized } @@ -812,7 +826,7 @@ func normalizeClaudeOAuthRequestBody(body []byte, modelID string, opts claudeOAu if !ok || text == "" { continue } - sanitized := sanitizeOpenCodeText(text) + sanitized := sanitizeSystemText(text) if sanitized != text { block["text"] = sanitized } From 3a34746668f22bd7a96d8c29a84aeb2a08f88bef Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 31 Jan 2026 02:01:51 +0800 Subject: [PATCH 063/214] refactor: stop rewriting tool descriptions; keep only system sentence rewrite --- .../internal/service/gateway_sanitize_test.go | 7 ++++--- backend/internal/service/gateway_service.go | 17 +++-------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/backend/internal/service/gateway_sanitize_test.go b/backend/internal/service/gateway_sanitize_test.go index a70c1a00..8fa971ca 100644 --- a/backend/internal/service/gateway_sanitize_test.go +++ b/backend/internal/service/gateway_sanitize_test.go @@ -13,8 +13,9 @@ func TestSanitizeOpenCodeText_RewritesCanonicalSentence(t *testing.T) { require.Equal(t, strings.TrimSpace(claudeCodeSystemPrompt), got) } -func TestSanitizeToolText_RewritesOpenCodeKeywords(t *testing.T) { +func TestSanitizeToolDescription_DoesNotRewriteKeywords(t *testing.T) { in := "OpenCode and opencode are mentioned." - got := sanitizeToolText(in) - require.Equal(t, "Claude Code and Claude are mentioned.", got) + got := sanitizeToolDescription(in) + // We no longer rewrite tool descriptions; only redact obvious path leaks. + require.Equal(t, in, got) } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 40354e48..703804a4 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -715,26 +715,15 @@ func sanitizeSystemText(text string) string { return text } -// sanitizeToolText is intentionally more aggressive than sanitizeSystemText because -// tool descriptions are not user chat content, and some upstreams may flag "opencode" -// strings as non-Claude-Code fingerprints. -func sanitizeToolText(text string) string { - if text == "" { - return text - } - text = sanitizeSystemText(text) - text = strings.ReplaceAll(text, "OpenCode", "Claude Code") - text = opencodeTextRe.ReplaceAllString(text, "Claude") - return text -} - func sanitizeToolDescription(description string) string { if description == "" { return description } description = toolDescAbsPathRe.ReplaceAllString(description, "[path]") description = toolDescWinPathRe.ReplaceAllString(description, "[path]") - return sanitizeToolText(description) + // Intentionally do NOT rewrite tool descriptions (OpenCode/Claude strings). + // Tool names/skill names may rely on exact wording, and rewriting can be misleading. + return description } func normalizeToolInputSchema(inputSchema any, cache map[string]string) { From adb77af1d973ea5dcf234a0776187ef888a3514b Mon Sep 17 00:00:00 2001 From: cyhhao Date: Sat, 31 Jan 2026 02:07:57 +0800 Subject: [PATCH 064/214] fix: satisfy golangci-lint (nil checks, remove unused helpers) --- backend/internal/service/gateway_service.go | 35 +++++++++------------ 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 703804a4..3d39e37c 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -215,7 +215,6 @@ var ( modelFieldRe = regexp.MustCompile(`"model"\s*:\s*"([^"]+)"`) toolDescAbsPathRe = regexp.MustCompile(`/\/?(?:home|Users|tmp|var|opt|usr|etc)\/[^\s,\)"'\]]+`) toolDescWinPathRe = regexp.MustCompile(`(?i)[A-Z]:\\[^\s,\)"'\]]+`) - opencodeTextRe = regexp.MustCompile(`(?i)opencode`) claudeToolNameOverrides = map[string]string{ "bash": "Bash", @@ -3320,11 +3319,16 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } } + clientHeaders := http.Header{} + if c != nil && c.Request != nil { + clientHeaders = c.Request.Header + } + // OAuth账号:应用统一指纹 var fingerprint *Fingerprint if account.IsOAuth() && s.identityService != nil { // 1. 获取或创建指纹(包含随机生成的ClientID) - fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, clientHeaders) if err != nil { log.Printf("Warning: failed to get fingerprint for account %d: %v", account.ID, err) // 失败时降级为透传原始headers @@ -3355,7 +3359,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } // 白名单透传headers - for key, values := range c.Request.Header { + for key, values := range clientHeaders { lowerKey := strings.ToLower(key) if allowedHeaders[lowerKey] { for _, v := range values { @@ -3479,20 +3483,6 @@ func requestNeedsBetaFeatures(body []byte) bool { return false } -func requestHasTools(body []byte) bool { - tools := gjson.GetBytes(body, "tools") - if !tools.Exists() { - return false - } - if tools.IsArray() { - return len(tools.Array()) > 0 - } - if tools.IsObject() { - return len(tools.Map()) > 0 - } - return false -} - func defaultAPIKeyBetaHeader(body []byte) string { modelID := gjson.GetBytes(body, "model").String() if strings.Contains(strings.ToLower(modelID), "haiku") { @@ -4804,10 +4794,15 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } } + clientHeaders := http.Header{} + if c != nil && c.Request != nil { + clientHeaders = c.Request.Header + } + // OAuth 账号:应用统一指纹和重写 userID // 如果启用了会话ID伪装,会在重写后替换 session 部分为固定值 if account.IsOAuth() && s.identityService != nil { - fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, clientHeaders) if err == nil { accountUUID := account.GetExtraString("account_uuid") if accountUUID != "" && fp.ClientID != "" { @@ -4831,7 +4826,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // 白名单透传 headers - for key, values := range c.Request.Header { + for key, values := range clientHeaders { lowerKey := strings.ToLower(key) if allowedHeaders[lowerKey] { for _, v := range values { @@ -4842,7 +4837,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // OAuth 账号:应用指纹到请求头 if account.IsOAuth() && s.identityService != nil { - fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, clientHeaders) if fp != nil { s.identityService.ApplyFingerprint(req, fp) } From f2e206700ce3754eeb56a8c6310dd69afc83152a Mon Sep 17 00:00:00 2001 From: iBenzene Date: Sat, 31 Jan 2026 00:53:39 +0800 Subject: [PATCH 065/214] feat: add support for using TLS to connect to Redis --- backend/internal/config/config.go | 3 ++ backend/internal/repository/redis.go | 12 +++++++- backend/internal/repository/redis_test.go | 12 ++++++++ backend/internal/setup/cli.go | 3 ++ backend/internal/setup/handler.go | 18 ++++++----- backend/internal/setup/setup.go | 32 ++++++++++++++------ config.yaml | 3 ++ deploy/.env.example | 1 + deploy/config.example.yaml | 3 ++ deploy/docker-compose.standalone.yml | 1 + deploy/docker-compose.yml | 1 + frontend/src/api/setup.ts | 1 + frontend/src/i18n/locales/en.ts | 4 ++- frontend/src/i18n/locales/zh.ts | 4 ++- frontend/src/views/setup/SetupWizardView.vue | 15 ++++++++- 15 files changed, 91 insertions(+), 22 deletions(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 477cb59d..84be445b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -415,6 +415,8 @@ type RedisConfig struct { PoolSize int `mapstructure:"pool_size"` // MinIdleConns: 最小空闲连接数,保持热连接减少冷启动延迟 MinIdleConns int `mapstructure:"min_idle_conns"` + // EnableTLS: 是否启用 TLS/SSL 连接 + EnableTLS bool `mapstructure:"enable_tls"` } func (r *RedisConfig) Address() string { @@ -762,6 +764,7 @@ func setDefaults() { viper.SetDefault("redis.write_timeout_seconds", 3) viper.SetDefault("redis.pool_size", 128) viper.SetDefault("redis.min_idle_conns", 10) + viper.SetDefault("redis.enable_tls", false) // Ops (vNext) viper.SetDefault("ops.enabled", true) diff --git a/backend/internal/repository/redis.go b/backend/internal/repository/redis.go index f3606ad9..ee6b2a59 100644 --- a/backend/internal/repository/redis.go +++ b/backend/internal/repository/redis.go @@ -1,6 +1,7 @@ package repository import ( + "crypto/tls" "time" "github.com/Wei-Shaw/sub2api/internal/config" @@ -26,7 +27,7 @@ func InitRedis(cfg *config.Config) *redis.Client { // buildRedisOptions 构建 Redis 连接选项 // 从配置文件读取连接池和超时参数,支持生产环境调优 func buildRedisOptions(cfg *config.Config) *redis.Options { - return &redis.Options{ + opts := &redis.Options{ Addr: cfg.Redis.Address(), Password: cfg.Redis.Password, DB: cfg.Redis.DB, @@ -36,4 +37,13 @@ func buildRedisOptions(cfg *config.Config) *redis.Options { PoolSize: cfg.Redis.PoolSize, // 连接池大小 MinIdleConns: cfg.Redis.MinIdleConns, // 最小空闲连接 } + + if cfg.Redis.EnableTLS { + opts.TLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: cfg.Redis.Host, + } + } + + return opts } diff --git a/backend/internal/repository/redis_test.go b/backend/internal/repository/redis_test.go index 756a63dc..7cb31002 100644 --- a/backend/internal/repository/redis_test.go +++ b/backend/internal/repository/redis_test.go @@ -32,4 +32,16 @@ func TestBuildRedisOptions(t *testing.T) { require.Equal(t, 4*time.Second, opts.WriteTimeout) require.Equal(t, 100, opts.PoolSize) require.Equal(t, 10, opts.MinIdleConns) + require.Nil(t, opts.TLSConfig) + + // Test case with TLS enabled + cfgTLS := &config.Config{ + Redis: config.RedisConfig{ + Host: "localhost", + EnableTLS: true, + }, + } + optsTLS := buildRedisOptions(cfgTLS) + require.NotNil(t, optsTLS.TLSConfig) + require.Equal(t, "localhost", optsTLS.TLSConfig.ServerName) } diff --git a/backend/internal/setup/cli.go b/backend/internal/setup/cli.go index 03ac3f66..2b323acf 100644 --- a/backend/internal/setup/cli.go +++ b/backend/internal/setup/cli.go @@ -149,6 +149,8 @@ func RunCLI() error { fmt.Println(" Invalid Redis DB. Must be between 0 and 15.") } + cfg.Redis.EnableTLS = promptConfirm(reader, "Enable Redis TLS?") + fmt.Println() fmt.Print("Testing Redis connection... ") if err := TestRedisConnection(&cfg.Redis); err != nil { @@ -205,6 +207,7 @@ func RunCLI() error { fmt.Println("── Configuration Summary ──") fmt.Printf("Database: %s@%s:%d/%s\n", cfg.Database.User, cfg.Database.Host, cfg.Database.Port, cfg.Database.DBName) fmt.Printf("Redis: %s:%d\n", cfg.Redis.Host, cfg.Redis.Port) + fmt.Printf("Redis TLS: %s\n", map[bool]string{true: "enabled", false: "disabled"}[cfg.Redis.EnableTLS]) fmt.Printf("Admin: %s\n", cfg.Admin.Email) fmt.Printf("Server: :%d\n", cfg.Server.Port) fmt.Println() diff --git a/backend/internal/setup/handler.go b/backend/internal/setup/handler.go index 1c613dfd..1531c97b 100644 --- a/backend/internal/setup/handler.go +++ b/backend/internal/setup/handler.go @@ -176,10 +176,11 @@ func testDatabase(c *gin.Context) { // TestRedisRequest represents Redis test request type TestRedisRequest struct { - Host string `json:"host" binding:"required"` - Port int `json:"port" binding:"required"` - Password string `json:"password"` - DB int `json:"db"` + Host string `json:"host" binding:"required"` + Port int `json:"port" binding:"required"` + Password string `json:"password"` + DB int `json:"db"` + EnableTLS bool `json:"enable_tls"` } // testRedis tests Redis connection @@ -205,10 +206,11 @@ func testRedis(c *gin.Context) { } cfg := &RedisConfig{ - Host: req.Host, - Port: req.Port, - Password: req.Password, - DB: req.DB, + Host: req.Host, + Port: req.Port, + Password: req.Password, + DB: req.DB, + EnableTLS: req.EnableTLS, } if err := TestRedisConnection(cfg); err != nil { diff --git a/backend/internal/setup/setup.go b/backend/internal/setup/setup.go index 65118161..f81f75cf 100644 --- a/backend/internal/setup/setup.go +++ b/backend/internal/setup/setup.go @@ -3,6 +3,7 @@ package setup import ( "context" "crypto/rand" + "crypto/tls" "database/sql" "encoding/hex" "fmt" @@ -79,10 +80,11 @@ type DatabaseConfig struct { } type RedisConfig struct { - Host string `json:"host" yaml:"host"` - Port int `json:"port" yaml:"port"` - Password string `json:"password" yaml:"password"` - DB int `json:"db" yaml:"db"` + Host string `json:"host" yaml:"host"` + Port int `json:"port" yaml:"port"` + Password string `json:"password" yaml:"password"` + DB int `json:"db" yaml:"db"` + EnableTLS bool `json:"enable_tls" yaml:"enable_tls"` } type AdminConfig struct { @@ -199,11 +201,20 @@ func TestDatabaseConnection(cfg *DatabaseConfig) error { // TestRedisConnection tests the Redis connection func TestRedisConnection(cfg *RedisConfig) error { - rdb := redis.NewClient(&redis.Options{ + opts := &redis.Options{ Addr: fmt.Sprintf("%s:%d", cfg.Host, cfg.Port), Password: cfg.Password, DB: cfg.DB, - }) + } + + if cfg.EnableTLS { + opts.TLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: cfg.Host, + } + } + + rdb := redis.NewClient(opts) defer func() { if err := rdb.Close(); err != nil { log.Printf("failed to close redis client: %v", err) @@ -485,10 +496,11 @@ func AutoSetupFromEnv() error { SSLMode: getEnvOrDefault("DATABASE_SSLMODE", "disable"), }, Redis: RedisConfig{ - Host: getEnvOrDefault("REDIS_HOST", "localhost"), - Port: getEnvIntOrDefault("REDIS_PORT", 6379), - Password: getEnvOrDefault("REDIS_PASSWORD", ""), - DB: getEnvIntOrDefault("REDIS_DB", 0), + Host: getEnvOrDefault("REDIS_HOST", "localhost"), + Port: getEnvIntOrDefault("REDIS_PORT", 6379), + Password: getEnvOrDefault("REDIS_PASSWORD", ""), + DB: getEnvIntOrDefault("REDIS_DB", 0), + EnableTLS: getEnvOrDefault("REDIS_ENABLE_TLS", "false") == "true", }, Admin: AdminConfig{ Email: getEnvOrDefault("ADMIN_EMAIL", "admin@sub2api.local"), diff --git a/config.yaml b/config.yaml index 5e7513fb..19f77221 100644 --- a/config.yaml +++ b/config.yaml @@ -322,6 +322,9 @@ redis: # Database number (0-15) # 数据库编号(0-15) db: 0 + # Enable TLS/SSL connection + # 是否启用 TLS/SSL 连接 + enable_tls: false # ============================================================================= # Ops Monitoring (Optional) diff --git a/deploy/.env.example b/deploy/.env.example index 1e9395a0..25096c3d 100644 --- a/deploy/.env.example +++ b/deploy/.env.example @@ -40,6 +40,7 @@ POSTGRES_DB=sub2api # Leave empty for no password (default for local development) REDIS_PASSWORD= REDIS_DB=0 +REDIS_ENABLE_TLS=false # ----------------------------------------------------------------------------- # Admin Account diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 98aba8f5..6f5e9744 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -376,6 +376,9 @@ redis: # Database number (0-15) # 数据库编号(0-15) db: 0 + # Enable TLS/SSL connection + # 是否启用 TLS/SSL 连接 + enable_tls: false # ============================================================================= # Ops Monitoring (Optional) diff --git a/deploy/docker-compose.standalone.yml b/deploy/docker-compose.standalone.yml index 1bf247c7..97903bc5 100644 --- a/deploy/docker-compose.standalone.yml +++ b/deploy/docker-compose.standalone.yml @@ -56,6 +56,7 @@ services: - REDIS_PORT=${REDIS_PORT:-6379} - REDIS_PASSWORD=${REDIS_PASSWORD:-} - REDIS_DB=${REDIS_DB:-0} + - REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false} # ======================================================================= # Admin Account (auto-created on first run) diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index ac6008d2..033731ac 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -62,6 +62,7 @@ services: - REDIS_PORT=6379 - REDIS_PASSWORD=${REDIS_PASSWORD:-} - REDIS_DB=${REDIS_DB:-0} + - REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false} # ======================================================================= # Admin Account (auto-created on first run) diff --git a/frontend/src/api/setup.ts b/frontend/src/api/setup.ts index 8b744590..1097c95b 100644 --- a/frontend/src/api/setup.ts +++ b/frontend/src/api/setup.ts @@ -31,6 +31,7 @@ export interface RedisConfig { port: number password: string db: number + enable_tls: boolean } export interface AdminConfig { diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index dc93d37c..64b589df 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -69,7 +69,9 @@ export default { port: 'Port', password: 'Password (optional)', database: 'Database', - passwordPlaceholder: 'Password' + passwordPlaceholder: 'Password', + enableTls: 'Enable TLS', + enableTlsHint: 'Use TLS when connecting to Redis (public CA certs)' }, admin: { title: 'Admin Account', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 4b6a9be6..19378915 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -66,7 +66,9 @@ export default { port: '端口', password: '密码(可选)', database: '数据库', - passwordPlaceholder: '密码' + passwordPlaceholder: '密码', + enableTls: '启用 TLS', + enableTlsHint: '连接 Redis 时使用 TLS(公共 CA 证书)' }, admin: { title: '管理员账户', diff --git a/frontend/src/views/setup/SetupWizardView.vue b/frontend/src/views/setup/SetupWizardView.vue index 2be837f5..00f437ba 100644 --- a/frontend/src/views/setup/SetupWizardView.vue +++ b/frontend/src/views/setup/SetupWizardView.vue @@ -91,6 +91,18 @@
+
+
+

+ {{ t("setup.redis.enableTls") }} +

+

+ {{ t("setup.redis.enableTlsHint") }} +

+
+ +
+
@@ -517,7 +529,8 @@ const formData = reactive({ host: 'localhost', port: 6379, password: '', - db: 0 + db: 0, + enable_tls: false }, admin: { email: '', From 35f39ca2912a8f7f269368669556fc5dcf8fd7b8 Mon Sep 17 00:00:00 2001 From: iBenzene Date: Sat, 31 Jan 2026 19:06:19 +0800 Subject: [PATCH 066/214] =?UTF-8?q?chore:=20=E4=BF=AE=E5=A4=8D=E4=BA=86=20?= =?UTF-8?q?redis.go=20=E4=B8=AD=E4=BB=A3=E7=A0=81=E9=A3=8E=E6=A0=BC?= =?UTF-8?q?=EF=BC=88golangci-lint=EF=BC=89=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/repository/redis.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/internal/repository/redis.go b/backend/internal/repository/redis.go index ee6b2a59..2b4ee4e6 100644 --- a/backend/internal/repository/redis.go +++ b/backend/internal/repository/redis.go @@ -40,8 +40,8 @@ func buildRedisOptions(cfg *config.Config) *redis.Options { if cfg.Redis.EnableTLS { opts.TLSConfig = &tls.Config{ - MinVersion: tls.VersionTLS12, - ServerName: cfg.Redis.Host, + MinVersion: tls.VersionTLS12, + ServerName: cfg.Redis.Host, } } From bbc7b4aeed6d33e6b6f7d42991edd949c9de8833 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Sun, 1 Feb 2026 16:29:27 +0800 Subject: [PATCH 067/214] =?UTF-8?q?feat(gateway):=20Gemini=20API=20Key=20?= =?UTF-8?q?=E8=B4=A6=E6=88=B7=E8=B7=B3=E8=BF=87=E6=A8=A1=E5=9E=8B=E6=98=A0?= =?UTF-8?q?=E5=B0=84=E6=A3=80=E6=9F=A5=EF=BC=8C=E7=9B=B4=E6=8E=A5=E9=80=8F?= =?UTF-8?q?=E4=BC=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gemini API Key 账户通常代理上游服务,模型支持由上游判断, 本地不需要预先配置模型映射。 --- backend/internal/service/gateway_service.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 2e3ba93e..7a901907 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -1893,6 +1893,10 @@ func (s *GatewayService) isModelSupportedByAccount(account *Account, requestedMo // Antigravity 平台使用专门的模型支持检查 return IsAntigravityModelSupported(requestedModel) } + // Gemini API Key 账户直接透传,由上游判断模型是否支持 + if account.Platform == PlatformGemini && account.Type == AccountTypeAPIKey { + return true + } // 其他平台使用账户的模型支持检查 return account.IsModelSupported(requestedModel) } From 4bfeeecb05a193719cec5d676e268f23a4ede1d0 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Mon, 2 Feb 2026 12:50:18 +0800 Subject: [PATCH 068/214] =?UTF-8?q?fix(billing):=20=E4=BF=AE=E5=A4=8D=20Ge?= =?UTF-8?q?mini=20=E6=8E=A5=E5=8F=A3=E7=BC=93=E5=AD=98=20token=20=E7=BB=9F?= =?UTF-8?q?=E8=AE=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit extractGeminiUsage 函数未提取 cachedContentTokenCount, 导致计费时缓存读取 token 始终为 0。 修复: - 提取 usageMetadata.cachedContentTokenCount - 设置 CacheReadInputTokens 字段 - InputTokens 减去缓存 token(与 response_transformer 逻辑一致) --- .../internal/service/gemini_messages_compat_service.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index aea880c2..659cdf03 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -2522,9 +2522,13 @@ func extractGeminiUsage(geminiResp map[string]any) *ClaudeUsage { } prompt, _ := asInt(usageMeta["promptTokenCount"]) cand, _ := asInt(usageMeta["candidatesTokenCount"]) + cached, _ := asInt(usageMeta["cachedContentTokenCount"]) + // 注意:Gemini 的 promptTokenCount 包含 cachedContentTokenCount, + // 但 Claude 的 input_tokens 不包含 cache_read_input_tokens,需要减去 return &ClaudeUsage{ - InputTokens: prompt, - OutputTokens: cand, + InputTokens: prompt - cached, + OutputTokens: cand, + CacheReadInputTokens: cached, } } From bbdc8663d32ba4db75cf3255ba71b1b469ed22d3 Mon Sep 17 00:00:00 2001 From: shaw Date: Mon, 2 Feb 2026 14:57:09 +0800 Subject: [PATCH 069/214] =?UTF-8?q?feat:=20=E9=87=8D=E6=96=B0=E8=AE=BE?= =?UTF-8?q?=E8=AE=A1=E5=85=AC=E5=91=8A=E7=B3=BB=E7=BB=9F=E4=B8=BAHeader?= =?UTF-8?q?=E9=93=83=E9=93=9B=E9=80=9A=E7=9F=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增 AnnouncementBell 组件,支持 Modal 弹窗和 Markdown 渲染 - 移除 Dashboard 横幅和独立公告页面 - 铃铛位置在 Header 文档按钮左侧,显示未读红点 - 支持点击查看详情、标记已读、全部已读等操作 - 完善国际化,移除所有硬编码中文 - 修复 AnnouncementTargetingEditor watch 循环问题 --- frontend/package-lock.json | 7212 ----------------- frontend/package.json | 3 + frontend/pnpm-lock.yaml | 17 + .../AnnouncementTargetingEditor.vue | 32 +- .../components/common/AnnouncementBell.vue | 626 ++ frontend/src/components/icons/Icon.vue | 3 + frontend/src/components/layout/AppHeader.vue | 6 +- frontend/src/components/layout/AppSidebar.vue | 2 - frontend/src/i18n/locales/en.ts | 11 +- frontend/src/i18n/locales/zh.ts | 11 +- frontend/src/router/index.ts | 12 - frontend/src/utils/format.ts | 19 + frontend/src/views/user/AnnouncementsView.vue | 140 - 13 files changed, 719 insertions(+), 7375 deletions(-) delete mode 100644 frontend/package-lock.json create mode 100644 frontend/src/components/common/AnnouncementBell.vue delete mode 100644 frontend/src/views/user/AnnouncementsView.vue diff --git a/frontend/package-lock.json b/frontend/package-lock.json deleted file mode 100644 index 5c43a6a8..00000000 --- a/frontend/package-lock.json +++ /dev/null @@ -1,7212 +0,0 @@ -{ - "name": "sub2api-frontend", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "sub2api-frontend", - "version": "1.0.0", - "dependencies": { - "@lobehub/icons": "^4.0.2", - "@vueuse/core": "^10.7.0", - "axios": "^1.6.2", - "chart.js": "^4.4.1", - "driver.js": "^1.4.0", - "file-saver": "^2.0.5", - "pinia": "^2.1.7", - "qrcode": "^1.5.4", - "vue": "^3.4.0", - "vue-chartjs": "^5.3.0", - "vue-i18n": "^9.14.5", - "vue-router": "^4.2.5", - "xlsx": "^0.18.5" - }, - "devDependencies": { - "@types/file-saver": "^2.0.7", - "@types/mdx": "^2.0.13", - "@types/node": "^20.10.5", - "@types/qrcode": "^1.5.6", - "@typescript-eslint/eslint-plugin": "^7.18.0", - "@typescript-eslint/parser": "^7.18.0", - "@vitejs/plugin-vue": "^5.2.3", - "@vitest/coverage-v8": "^2.1.9", - "@vue/test-utils": "^2.4.6", - "autoprefixer": "^10.4.16", - "eslint": "^8.57.0", - "eslint-plugin-vue": "^9.25.0", - "jsdom": "^24.1.3", - "postcss": "^8.4.32", - "tailwindcss": "^3.4.0", - "typescript": "~5.6.0", - "vite": "^5.0.10", - "vite-plugin-checker": "^0.9.1", - "vitest": "^2.1.9", - "vue-tsc": "^2.2.0" - } - }, - "node_modules/@alloc/quick-lru": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", - "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@ampproject/remapping": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", - "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@ant-design/cssinjs": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@ant-design/cssinjs/-/cssinjs-2.0.2.tgz", - "integrity": "sha512-7KDVIigtqlamOLtJ0hbjECX/sDGDaJXsM/KHala8I/1E4lpl9RAO585kbVvh/k1rIrFAV6JeGkXmdWyYj9XvuA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.11.1", - "@emotion/hash": "^0.8.0", - "@emotion/unitless": "^0.7.5", - "@rc-component/util": "^1.4.0", - "clsx": "^2.1.1", - "csstype": "^3.1.3", - "stylis": "^4.3.4" - }, - "peerDependencies": { - "react": ">=16.0.0", - "react-dom": ">=16.0.0" - } - }, - "node_modules/@asamuzakjp/css-color": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz", - "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@csstools/css-calc": "^2.1.3", - "@csstools/css-color-parser": "^3.0.9", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "lru-cache": "^10.4.3" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", - "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", - "license": "MIT", - "dependencies": { - "@babel/helper-validator-identifier": "^7.27.1", - "js-tokens": "^4.0.0", - "picocolors": "^1.1.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/generator": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", - "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.28.5", - "@babel/types": "^7.28.5", - "@jridgewell/gen-mapping": "^0.3.12", - "@jridgewell/trace-mapping": "^0.3.28", - "jsesc": "^3.0.2" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-globals": { - "version": "7.28.0", - "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", - "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", - "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.27.1", - "@babel/types": "^7.27.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-string-parser": { - "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", - "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", - "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/parser": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", - "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.28.5" - }, - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/runtime": { - "version": "7.28.4", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", - "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/template": { - "version": "7.27.2", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", - "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.27.1", - "@babel/parser": "^7.27.2", - "@babel/types": "^7.27.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", - "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.27.1", - "@babel/generator": "^7.28.5", - "@babel/helper-globals": "^7.28.0", - "@babel/parser": "^7.28.5", - "@babel/template": "^7.27.2", - "@babel/types": "^7.28.5", - "debug": "^4.3.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/types": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", - "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", - "license": "MIT", - "dependencies": { - "@babel/helper-string-parser": "^7.27.1", - "@babel/helper-validator-identifier": "^7.28.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@bcoe/v8-coverage": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", - "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@csstools/color-helpers": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", - "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - } - }, - "node_modules/@csstools/css-calc": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", - "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.5", - "@csstools/css-tokenizer": "^3.0.4" - } - }, - "node_modules/@csstools/css-color-parser": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", - "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/color-helpers": "^5.1.0", - "@csstools/css-calc": "^2.1.4" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.5", - "@csstools/css-tokenizer": "^3.0.4" - } - }, - "node_modules/@csstools/css-parser-algorithms": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", - "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-tokenizer": "^3.0.4" - } - }, - "node_modules/@csstools/css-tokenizer": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", - "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - } - }, - "node_modules/@emotion/babel-plugin": { - "version": "11.13.5", - "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz", - "integrity": "sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.16.7", - "@babel/runtime": "^7.18.3", - "@emotion/hash": "^0.9.2", - "@emotion/memoize": "^0.9.0", - "@emotion/serialize": "^1.3.3", - "babel-plugin-macros": "^3.1.0", - "convert-source-map": "^1.5.0", - "escape-string-regexp": "^4.0.0", - "find-root": "^1.1.0", - "source-map": "^0.5.7", - "stylis": "4.2.0" - } - }, - "node_modules/@emotion/babel-plugin/node_modules/@emotion/hash": { - "version": "0.9.2", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", - "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", - "license": "MIT" - }, - "node_modules/@emotion/babel-plugin/node_modules/stylis": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", - "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", - "license": "MIT" - }, - "node_modules/@emotion/cache": { - "version": "11.14.0", - "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.14.0.tgz", - "integrity": "sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA==", - "license": "MIT", - "dependencies": { - "@emotion/memoize": "^0.9.0", - "@emotion/sheet": "^1.4.0", - "@emotion/utils": "^1.4.2", - "@emotion/weak-memoize": "^0.4.0", - "stylis": "4.2.0" - } - }, - "node_modules/@emotion/cache/node_modules/stylis": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", - "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", - "license": "MIT" - }, - "node_modules/@emotion/css": { - "version": "11.13.5", - "resolved": "https://registry.npmjs.org/@emotion/css/-/css-11.13.5.tgz", - "integrity": "sha512-wQdD0Xhkn3Qy2VNcIzbLP9MR8TafI0MJb7BEAXKp+w4+XqErksWR4OXomuDzPsN4InLdGhVe6EYcn2ZIUCpB8w==", - "license": "MIT", - "dependencies": { - "@emotion/babel-plugin": "^11.13.5", - "@emotion/cache": "^11.13.5", - "@emotion/serialize": "^1.3.3", - "@emotion/sheet": "^1.4.0", - "@emotion/utils": "^1.4.2" - } - }, - "node_modules/@emotion/hash": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.8.0.tgz", - "integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==", - "license": "MIT" - }, - "node_modules/@emotion/memoize": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.9.0.tgz", - "integrity": "sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==", - "license": "MIT" - }, - "node_modules/@emotion/react": { - "version": "11.14.0", - "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.14.0.tgz", - "integrity": "sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.18.3", - "@emotion/babel-plugin": "^11.13.5", - "@emotion/cache": "^11.14.0", - "@emotion/serialize": "^1.3.3", - "@emotion/use-insertion-effect-with-fallbacks": "^1.2.0", - "@emotion/utils": "^1.4.2", - "@emotion/weak-memoize": "^0.4.0", - "hoist-non-react-statics": "^3.3.1" - }, - "peerDependencies": { - "react": ">=16.8.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@emotion/serialize": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.3.3.tgz", - "integrity": "sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA==", - "license": "MIT", - "dependencies": { - "@emotion/hash": "^0.9.2", - "@emotion/memoize": "^0.9.0", - "@emotion/unitless": "^0.10.0", - "@emotion/utils": "^1.4.2", - "csstype": "^3.0.2" - } - }, - "node_modules/@emotion/serialize/node_modules/@emotion/hash": { - "version": "0.9.2", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", - "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", - "license": "MIT" - }, - "node_modules/@emotion/serialize/node_modules/@emotion/unitless": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.10.0.tgz", - "integrity": "sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==", - "license": "MIT" - }, - "node_modules/@emotion/sheet": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.4.0.tgz", - "integrity": "sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==", - "license": "MIT" - }, - "node_modules/@emotion/unitless": { - "version": "0.7.5", - "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.7.5.tgz", - "integrity": "sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==", - "license": "MIT" - }, - "node_modules/@emotion/use-insertion-effect-with-fallbacks": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz", - "integrity": "sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg==", - "license": "MIT", - "peerDependencies": { - "react": ">=16.8.0" - } - }, - "node_modules/@emotion/utils": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.4.2.tgz", - "integrity": "sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA==", - "license": "MIT" - }, - "node_modules/@emotion/weak-memoize": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz", - "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", - "license": "MIT" - }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", - "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", - "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", - "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", - "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", - "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", - "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", - "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", - "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", - "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", - "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", - "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", - "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", - "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", - "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", - "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", - "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", - "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", - "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", - "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", - "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", - "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", - "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", - "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@eslint-community/eslint-utils": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", - "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" - } - }, - "node_modules/@eslint-community/regexpp": { - "version": "4.12.2", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", - "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.0.0 || ^14.0.0 || >=16.0.0" - } - }, - "node_modules/@eslint/eslintrc": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", - "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.6.0", - "globals": "^13.19.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@eslint/eslintrc/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@eslint/js": { - "version": "8.57.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", - "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - } - }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.13.0", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", - "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", - "deprecated": "Use @eslint/config-array instead", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@humanwhocodes/object-schema": "^2.0.3", - "debug": "^4.3.1", - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=10.10.0" - } - }, - "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@humanwhocodes/module-importer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.22" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, - "node_modules/@humanwhocodes/object-schema": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", - "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", - "deprecated": "Use @eslint/object-schema instead", - "dev": true, - "license": "BSD-3-Clause" - }, - "node_modules/@intlify/core-base": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-9.14.5.tgz", - "integrity": "sha512-5ah5FqZG4pOoHjkvs8mjtv+gPKYU0zCISaYNjBNNqYiaITxW8ZtVih3GS/oTOqN8d9/mDLyrjD46GBApNxmlsA==", - "license": "MIT", - "dependencies": { - "@intlify/message-compiler": "9.14.5", - "@intlify/shared": "9.14.5" - }, - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - } - }, - "node_modules/@intlify/message-compiler": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-9.14.5.tgz", - "integrity": "sha512-IHzgEu61/YIpQV5Pc3aRWScDcnFKWvQA9kigcINcCBXN8mbW+vk9SK+lDxA6STzKQsVJxUPg9ACC52pKKo3SVQ==", - "license": "MIT", - "dependencies": { - "@intlify/shared": "9.14.5", - "source-map-js": "^1.0.2" - }, - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - } - }, - "node_modules/@intlify/shared": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-9.14.5.tgz", - "integrity": "sha512-9gB+E53BYuAEMhbCAxVgG38EZrk59sxBtv3jSizNL2hEWlgjBjAw1AwpLHtNaeda12pe6W20OGEa0TwuMSRbyQ==", - "license": "MIT", - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - } - }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@isaacs/cliui/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/@isaacs/cliui/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/@istanbuljs/schema": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.13", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", - "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.0", - "@jridgewell/trace-mapping": "^0.3.24" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", - "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", - "license": "MIT" - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.31", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", - "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", - "license": "MIT", - "dependencies": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" - } - }, - "node_modules/@kurkle/color": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.4.tgz", - "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==", - "license": "MIT" - }, - "node_modules/@lobehub/icons": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@lobehub/icons/-/icons-4.0.2.tgz", - "integrity": "sha512-mYFEXXt7Z8iY8yLP5cDVctUPqlZUHWi5qzQCJiC646p7uiXhtpn93sRab/5pey+CYDh6BbRU6lhwiURu/SU5IA==", - "license": "MIT", - "workspaces": [ - "packages/*" - ], - "dependencies": { - "antd-style": "^4.1.0", - "lucide-react": "^0.469.0", - "polished": "^4.3.1" - }, - "peerDependencies": { - "@lobehub/ui": "^4.3.3", - "antd": "^6.1.1", - "react": "^19.0.0", - "react-dom": "^19.0.0" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@one-ini/wasm": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@one-ini/wasm/-/wasm-0.1.1.tgz", - "integrity": "sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "dev": true, - "license": "MIT", - "optional": true, - "engines": { - "node": ">=14" - } - }, - "node_modules/@rc-component/util": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@rc-component/util/-/util-1.7.0.tgz", - "integrity": "sha512-tIvIGj4Vl6fsZFvWSkYw9sAfiCKUXMyhVz6kpKyZbwyZyRPqv2vxYZROdaO1VB4gqTNvUZFXh6i3APUiterw5g==", - "license": "MIT", - "dependencies": { - "is-mobile": "^5.0.0", - "react-is": "^18.2.0" - }, - "peerDependencies": { - "react": ">=18.0.0", - "react-dom": ">=18.0.0" - } - }, - "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz", - "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-android-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz", - "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz", - "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz", - "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz", - "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz", - "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz", - "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz", - "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz", - "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz", - "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz", - "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz", - "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz", - "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz", - "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz", - "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz", - "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz", - "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz", - "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz", - "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz", - "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ] - }, - "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz", - "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ] - }, - "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz", - "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz", - "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz", - "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz", - "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@types/estree": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", - "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/file-saver": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/@types/file-saver/-/file-saver-2.0.7.tgz", - "integrity": "sha512-dNKVfHd/jk0SkR/exKGj2ggkB45MAkzvWCaqLUUgkyjITkGNzH8H+yUwr+BLJUBjZOe9w8X3wgmXhZDRg1ED6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/mdx": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", - "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "20.19.27", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.27.tgz", - "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", - "dev": true, - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "node_modules/@types/parse-json": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", - "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", - "license": "MIT" - }, - "node_modules/@types/qrcode": { - "version": "1.5.6", - "resolved": "https://registry.npmmirror.com/@types/qrcode/-/qrcode-1.5.6.tgz", - "integrity": "sha512-te7NQcV2BOvdj2b1hCAHzAoMNuj65kNBMz0KBaxM6c3VGBOhU0dURQKOtH8CFNI/dsKkwlv32p26qYQTWoB5bw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/web-bluetooth": { - "version": "0.0.20", - "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz", - "integrity": "sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==", - "license": "MIT" - }, - "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.18.0.tgz", - "integrity": "sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.18.0", - "@typescript-eslint/type-utils": "7.18.0", - "@typescript-eslint/utils": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0", - "graphemer": "^1.4.0", - "ignore": "^5.3.1", - "natural-compare": "^1.4.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^7.0.0", - "eslint": "^8.56.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/parser": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.18.0.tgz", - "integrity": "sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "@typescript-eslint/scope-manager": "7.18.0", - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/typescript-estree": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0", - "debug": "^4.3.4" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.56.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.18.0.tgz", - "integrity": "sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/type-utils": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.18.0.tgz", - "integrity": "sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/typescript-estree": "7.18.0", - "@typescript-eslint/utils": "7.18.0", - "debug": "^4.3.4", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.56.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/types": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.18.0.tgz", - "integrity": "sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.18.0.tgz", - "integrity": "sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/utils": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.18.0.tgz", - "integrity": "sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.18.0", - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/typescript-estree": "7.18.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.56.0" - } - }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.18.0.tgz", - "integrity": "sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "7.18.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@ungap/structured-clone": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", - "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", - "dev": true, - "license": "ISC" - }, - "node_modules/@vitejs/plugin-vue": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-5.2.4.tgz", - "integrity": "sha512-7Yx/SXSOcQq5HiiV3orevHUFn+pmMB4cgbEkDYgnkUWb0WfeQ/wa2yFv6D5ICiCQOVpjA7vYDXrC7AGO8yjDHA==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "peerDependencies": { - "vite": "^5.0.0 || ^6.0.0", - "vue": "^3.2.25" - } - }, - "node_modules/@vitest/coverage-v8": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-2.1.9.tgz", - "integrity": "sha512-Z2cOr0ksM00MpEfyVE8KXIYPEcBFxdbLSs56L8PO0QQMxt/6bDj45uQfxoc96v05KW3clk7vvgP0qfDit9DmfQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@ampproject/remapping": "^2.3.0", - "@bcoe/v8-coverage": "^0.2.3", - "debug": "^4.3.7", - "istanbul-lib-coverage": "^3.2.2", - "istanbul-lib-report": "^3.0.1", - "istanbul-lib-source-maps": "^5.0.6", - "istanbul-reports": "^3.1.7", - "magic-string": "^0.30.12", - "magicast": "^0.3.5", - "std-env": "^3.8.0", - "test-exclude": "^7.0.1", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@vitest/browser": "2.1.9", - "vitest": "2.1.9" - }, - "peerDependenciesMeta": { - "@vitest/browser": { - "optional": true - } - } - }, - "node_modules/@vitest/expect": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.9.tgz", - "integrity": "sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "2.1.9", - "@vitest/utils": "2.1.9", - "chai": "^5.1.2", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/mocker": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.9.tgz", - "integrity": "sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "2.1.9", - "estree-walker": "^3.0.3", - "magic-string": "^0.30.12" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "msw": "^2.4.9", - "vite": "^5.0.0" - }, - "peerDependenciesMeta": { - "msw": { - "optional": true - }, - "vite": { - "optional": true - } - } - }, - "node_modules/@vitest/mocker/node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "node_modules/@vitest/pretty-format": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.9.tgz", - "integrity": "sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/runner": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.9.tgz", - "integrity": "sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/utils": "2.1.9", - "pathe": "^1.1.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/snapshot": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.9.tgz", - "integrity": "sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "2.1.9", - "magic-string": "^0.30.12", - "pathe": "^1.1.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/spy": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.9.tgz", - "integrity": "sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyspy": "^3.0.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/utils": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.9.tgz", - "integrity": "sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "2.1.9", - "loupe": "^3.1.2", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@volar/language-core": { - "version": "2.4.15", - "resolved": "https://registry.npmjs.org/@volar/language-core/-/language-core-2.4.15.tgz", - "integrity": "sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/source-map": "2.4.15" - } - }, - "node_modules/@volar/source-map": { - "version": "2.4.15", - "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-2.4.15.tgz", - "integrity": "sha512-CPbMWlUN6hVZJYGcU/GSoHu4EnCHiLaXI9n8c9la6RaI9W5JHX+NqG+GSQcB0JdC2FIBLdZJwGsfKyBB71VlTg==", - "dev": true, - "license": "MIT" - }, - "node_modules/@volar/typescript": { - "version": "2.4.15", - "resolved": "https://registry.npmjs.org/@volar/typescript/-/typescript-2.4.15.tgz", - "integrity": "sha512-2aZ8i0cqPGjXb4BhkMsPYDkkuc2ZQ6yOpqwAuNwUoncELqoy5fRgOQtLR9gB0g902iS0NAkvpIzs27geVyVdPg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/language-core": "2.4.15", - "path-browserify": "^1.0.1", - "vscode-uri": "^3.0.8" - } - }, - "node_modules/@vue/compiler-core": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.26.tgz", - "integrity": "sha512-vXyI5GMfuoBCnv5ucIT7jhHKl55Y477yxP6fc4eUswjP8FG3FFVFd41eNDArR+Uk3QKn2Z85NavjaxLxOC19/w==", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.28.5", - "@vue/shared": "3.5.26", - "entities": "^7.0.0", - "estree-walker": "^2.0.2", - "source-map-js": "^1.2.1" - } - }, - "node_modules/@vue/compiler-dom": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.5.26.tgz", - "integrity": "sha512-y1Tcd3eXs834QjswshSilCBnKGeQjQXB6PqFn/1nxcQw4pmG42G8lwz+FZPAZAby6gZeHSt/8LMPfZ4Rb+Bd/A==", - "license": "MIT", - "dependencies": { - "@vue/compiler-core": "3.5.26", - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/compiler-sfc": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.5.26.tgz", - "integrity": "sha512-egp69qDTSEZcf4bGOSsprUr4xI73wfrY5oRs6GSgXFTiHrWj4Y3X5Ydtip9QMqiCMCPVwLglB9GBxXtTadJ3mA==", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.28.5", - "@vue/compiler-core": "3.5.26", - "@vue/compiler-dom": "3.5.26", - "@vue/compiler-ssr": "3.5.26", - "@vue/shared": "3.5.26", - "estree-walker": "^2.0.2", - "magic-string": "^0.30.21", - "postcss": "^8.5.6", - "source-map-js": "^1.2.1" - } - }, - "node_modules/@vue/compiler-ssr": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.26.tgz", - "integrity": "sha512-lZT9/Y0nSIRUPVvapFJEVDbEXruZh2IYHMk2zTtEgJSlP5gVOqeWXH54xDKAaFS4rTnDeDBQUYDtxKyoW9FwDw==", - "license": "MIT", - "dependencies": { - "@vue/compiler-dom": "3.5.26", - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/compiler-vue2": { - "version": "2.7.16", - "resolved": "https://registry.npmjs.org/@vue/compiler-vue2/-/compiler-vue2-2.7.16.tgz", - "integrity": "sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A==", - "dev": true, - "license": "MIT", - "dependencies": { - "de-indent": "^1.0.2", - "he": "^1.2.0" - } - }, - "node_modules/@vue/devtools-api": { - "version": "6.6.4", - "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.4.tgz", - "integrity": "sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==", - "license": "MIT" - }, - "node_modules/@vue/language-core": { - "version": "2.2.12", - "resolved": "https://registry.npmjs.org/@vue/language-core/-/language-core-2.2.12.tgz", - "integrity": "sha512-IsGljWbKGU1MZpBPN+BvPAdr55YPkj2nB/TBNGNC32Vy2qLG25DYu/NBN2vNtZqdRbTRjaoYrahLrToim2NanA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/language-core": "2.4.15", - "@vue/compiler-dom": "^3.5.0", - "@vue/compiler-vue2": "^2.7.16", - "@vue/shared": "^3.5.0", - "alien-signals": "^1.0.3", - "minimatch": "^9.0.3", - "muggle-string": "^0.4.1", - "path-browserify": "^1.0.1" - }, - "peerDependencies": { - "typescript": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@vue/reactivity": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.26.tgz", - "integrity": "sha512-9EnYB1/DIiUYYnzlnUBgwU32NNvLp/nhxLXeWRhHUEeWNTn1ECxX8aGO7RTXeX6PPcxe3LLuNBFoJbV4QZ+CFQ==", - "license": "MIT", - "dependencies": { - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/runtime-core": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.5.26.tgz", - "integrity": "sha512-xJWM9KH1kd201w5DvMDOwDHYhrdPTrAatn56oB/LRG4plEQeZRQLw0Bpwih9KYoqmzaxF0OKSn6swzYi84e1/Q==", - "license": "MIT", - "dependencies": { - "@vue/reactivity": "3.5.26", - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/runtime-dom": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.5.26.tgz", - "integrity": "sha512-XLLd/+4sPC2ZkN/6+V4O4gjJu6kSDbHAChvsyWgm1oGbdSO3efvGYnm25yCjtFm/K7rrSDvSfPDgN1pHgS4VNQ==", - "license": "MIT", - "dependencies": { - "@vue/reactivity": "3.5.26", - "@vue/runtime-core": "3.5.26", - "@vue/shared": "3.5.26", - "csstype": "^3.2.3" - } - }, - "node_modules/@vue/server-renderer": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.5.26.tgz", - "integrity": "sha512-TYKLXmrwWKSodyVuO1WAubucd+1XlLg4set0YoV+Hu8Lo79mp/YMwWV5mC5FgtsDxX3qo1ONrxFaTP1OQgy1uA==", - "license": "MIT", - "dependencies": { - "@vue/compiler-ssr": "3.5.26", - "@vue/shared": "3.5.26" - }, - "peerDependencies": { - "vue": "3.5.26" - } - }, - "node_modules/@vue/shared": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.26.tgz", - "integrity": "sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A==", - "license": "MIT" - }, - "node_modules/@vue/test-utils": { - "version": "2.4.6", - "resolved": "https://registry.npmjs.org/@vue/test-utils/-/test-utils-2.4.6.tgz", - "integrity": "sha512-FMxEjOpYNYiFe0GkaHsnJPXFHxQ6m4t8vI/ElPGpMWxZKpmRvQ33OIrvRXemy6yha03RxhOlQuy+gZMC3CQSow==", - "dev": true, - "license": "MIT", - "dependencies": { - "js-beautify": "^1.14.9", - "vue-component-type-helpers": "^2.0.0" - } - }, - "node_modules/@vueuse/core": { - "version": "10.11.1", - "resolved": "https://registry.npmjs.org/@vueuse/core/-/core-10.11.1.tgz", - "integrity": "sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww==", - "license": "MIT", - "dependencies": { - "@types/web-bluetooth": "^0.0.20", - "@vueuse/metadata": "10.11.1", - "@vueuse/shared": "10.11.1", - "vue-demi": ">=0.14.8" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/@vueuse/metadata": { - "version": "10.11.1", - "resolved": "https://registry.npmjs.org/@vueuse/metadata/-/metadata-10.11.1.tgz", - "integrity": "sha512-IGa5FXd003Ug1qAZmyE8wF3sJ81xGLSqTqtQ6jaVfkeZ4i5kS2mwQF61yhVqojRnenVew5PldLyRgvdl4YYuSw==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/@vueuse/shared": { - "version": "10.11.1", - "resolved": "https://registry.npmjs.org/@vueuse/shared/-/shared-10.11.1.tgz", - "integrity": "sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==", - "license": "MIT", - "dependencies": { - "vue-demi": ">=0.14.8" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/abbrev": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-2.0.0.tgz", - "integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==", - "dev": true, - "license": "ISC", - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/acorn": { - "version": "8.15.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", - "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", - "dev": true, - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "dev": true, - "license": "MIT", - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "node_modules/adler-32": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/adler-32/-/adler-32-1.3.1.tgz", - "integrity": "sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/agent-base": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", - "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/alien-signals": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/alien-signals/-/alien-signals-1.0.13.tgz", - "integrity": "sha512-OGj9yyTnJEttvzhTUWuscOvtqxq5vrhF7vL9oS0xJ2mK0ItPYP1/y+vCFebfxoEyAz0++1AIwJ5CMr+Fk3nDmg==", - "dev": true, - "license": "MIT" - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/antd-style": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/antd-style/-/antd-style-4.1.0.tgz", - "integrity": "sha512-vnPBGg0OVlSz90KRYZhxd89aZiOImTiesF+9MQqN8jsLGZUQTjbP04X9jTdEfsztKUuMbBWg/RmB/wHTakbtMQ==", - "license": "MIT", - "dependencies": { - "@ant-design/cssinjs": "^2.0.0", - "@babel/runtime": "^7.24.1", - "@emotion/cache": "^11.11.0", - "@emotion/css": "^11.11.2", - "@emotion/react": "^11.11.4", - "@emotion/serialize": "^1.1.3", - "@emotion/utils": "^1.2.1", - "use-merge-value": "^1.2.0" - }, - "peerDependencies": { - "antd": ">=6.0.0", - "react": ">=18" - } - }, - "node_modules/any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "dev": true, - "license": "MIT" - }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "dev": true, - "license": "ISC", - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", - "dev": true, - "license": "MIT" - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "license": "Python-2.0" - }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/assertion-error": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", - "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "license": "MIT" - }, - "node_modules/autoprefixer": { - "version": "10.4.23", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz", - "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "browserslist": "^4.28.1", - "caniuse-lite": "^1.0.30001760", - "fraction.js": "^5.3.4", - "picocolors": "^1.1.1", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/axios": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", - "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", - "license": "MIT", - "dependencies": { - "follow-redirects": "^1.15.6", - "form-data": "^4.0.4", - "proxy-from-env": "^1.1.0" - } - }, - "node_modules/babel-plugin-macros": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz", - "integrity": "sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.5", - "cosmiconfig": "^7.0.0", - "resolve": "^1.19.0" - }, - "engines": { - "node": ">=10", - "npm": ">=6" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/baseline-browser-mapping": { - "version": "2.9.14", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.14.tgz", - "integrity": "sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "baseline-browser-mapping": "dist/cli.js" - } - }, - "node_modules/binary-extensions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", - "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", - "dev": true, - "license": "ISC" - }, - "node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, - "license": "MIT", - "dependencies": { - "fill-range": "^7.1.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/browserslist": { - "version": "4.28.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", - "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "baseline-browser-mapping": "^2.9.0", - "caniuse-lite": "^1.0.30001759", - "electron-to-chromium": "^1.5.263", - "node-releases": "^2.0.27", - "update-browserslist-db": "^1.2.0" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmmirror.com/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/camelcase-css": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001763", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001763.tgz", - "integrity": "sha512-mh/dGtq56uN98LlNX9qdbKnzINhX0QzhiWBFEkFfsFO4QyCvL8YegrJAazCwXIeqkIob8BlZPGM3xdnY+sgmvQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "CC-BY-4.0" - }, - "node_modules/cfb": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cfb/-/cfb-1.2.2.tgz", - "integrity": "sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA==", - "license": "Apache-2.0", - "dependencies": { - "adler-32": "~1.3.0", - "crc-32": "~1.2.0" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/chai": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", - "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "assertion-error": "^2.0.1", - "check-error": "^2.1.1", - "deep-eql": "^5.0.1", - "loupe": "^3.1.0", - "pathval": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/chart.js": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.1.tgz", - "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", - "license": "MIT", - "dependencies": { - "@kurkle/color": "^0.3.0" - }, - "engines": { - "pnpm": ">=8" - } - }, - "node_modules/check-error": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz", - "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 16" - } - }, - "node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", - "dev": true, - "license": "MIT", - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/chokidar/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/cliui": { - "version": "6.0.0", - "resolved": "https://registry.npmmirror.com/cliui/-/cliui-6.0.0.tgz", - "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^6.2.0" - } - }, - "node_modules/cliui/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/cliui/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "6.2.0", - "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/clsx": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", - "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/codepage": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/codepage/-/codepage-1.15.0.tgz", - "integrity": "sha512-3g6NUTPd/YtuuGrhMnOMRjFc+LJw/bnMp3+0r/Wcz3IXUuCosKRJvMphm5+Q+bvTVGcJJuRvVLuYba+WojaFaA==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "license": "MIT" - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "license": "MIT", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true, - "license": "MIT" - }, - "node_modules/config-chain": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "node_modules/convert-source-map": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", - "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", - "license": "MIT" - }, - "node_modules/cosmiconfig": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", - "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", - "license": "MIT", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/crc-32": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/crc-32/-/crc-32-1.2.2.tgz", - "integrity": "sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==", - "license": "Apache-2.0", - "bin": { - "crc32": "bin/crc32.njs" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "dev": true, - "license": "MIT", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssstyle": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz", - "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@asamuzakjp/css-color": "^3.2.0", - "rrweb-cssom": "^0.8.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/cssstyle/node_modules/rrweb-cssom": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", - "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", - "dev": true, - "license": "MIT" - }, - "node_modules/csstype": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", - "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", - "license": "MIT" - }, - "node_modules/data-urls": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", - "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==", - "dev": true, - "license": "MIT", - "dependencies": { - "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^14.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/de-indent": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", - "integrity": "sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==", - "dev": true, - "license": "MIT" - }, - "node_modules/debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmmirror.com/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/decimal.js": { - "version": "10.6.0", - "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", - "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", - "dev": true, - "license": "MIT" - }, - "node_modules/deep-eql": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", - "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "license": "MIT", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/didyoumean": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", - "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/dijkstrajs": { - "version": "1.0.3", - "resolved": "https://registry.npmmirror.com/dijkstrajs/-/dijkstrajs-1.0.3.tgz", - "integrity": "sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA==", - "license": "MIT" - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dlv": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", - "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", - "dev": true, - "license": "MIT" - }, - "node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/driver.js": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/driver.js/-/driver.js-1.4.0.tgz", - "integrity": "sha512-Gm64jm6PmcU+si21sQhBrTAM1JvUrR0QhNmjkprNLxohOBzul9+pNHXgQaT9lW84gwg9GMLB3NZGuGolsz5uew==", - "license": "MIT" - }, - "node_modules/dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "dev": true, - "license": "MIT" - }, - "node_modules/editorconfig": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-1.0.4.tgz", - "integrity": "sha512-L9Qe08KWTlqYMVvMcTIvMAdl1cDUubzRNYL+WfA4bLDMHe4nemKkpmYzkznE1FwLKu0EEmy6obgQKzMJrg4x9Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@one-ini/wasm": "0.1.1", - "commander": "^10.0.0", - "minimatch": "9.0.1", - "semver": "^7.5.3" - }, - "bin": { - "editorconfig": "bin/editorconfig" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/editorconfig/node_modules/commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14" - } - }, - "node_modules/editorconfig/node_modules/minimatch": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.1.tgz", - "integrity": "sha512-0jWhJpD/MdhPXwPuiRkCbfYfSKp2qnn2eOc279qI7f+osl/l+prKSrvhg157zSYvx/1nmgn2NqdT6k2Z7zSH9w==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/electron-to-chromium": { - "version": "1.5.267", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", - "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", - "dev": true, - "license": "ISC" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true, - "license": "MIT" - }, - "node_modules/entities": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-7.0.0.tgz", - "integrity": "sha512-FDWG5cmEYf2Z00IkYRhbFrwIwvdFKH07uV8dvNy0omp/Qb1xcyCWp2UDtcwJF4QZZvk0sLudP6/hAu42TaqVhQ==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/error-ex": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", - "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", - "license": "MIT", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-module-lexer": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", - "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", - "dev": true, - "license": "MIT" - }, - "node_modules/es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-set-tostringtag": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", - "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.6", - "has-tostringtag": "^1.0.2", - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/esbuild": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", - "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.21.5", - "@esbuild/android-arm": "0.21.5", - "@esbuild/android-arm64": "0.21.5", - "@esbuild/android-x64": "0.21.5", - "@esbuild/darwin-arm64": "0.21.5", - "@esbuild/darwin-x64": "0.21.5", - "@esbuild/freebsd-arm64": "0.21.5", - "@esbuild/freebsd-x64": "0.21.5", - "@esbuild/linux-arm": "0.21.5", - "@esbuild/linux-arm64": "0.21.5", - "@esbuild/linux-ia32": "0.21.5", - "@esbuild/linux-loong64": "0.21.5", - "@esbuild/linux-mips64el": "0.21.5", - "@esbuild/linux-ppc64": "0.21.5", - "@esbuild/linux-riscv64": "0.21.5", - "@esbuild/linux-s390x": "0.21.5", - "@esbuild/linux-x64": "0.21.5", - "@esbuild/netbsd-x64": "0.21.5", - "@esbuild/openbsd-x64": "0.21.5", - "@esbuild/sunos-x64": "0.21.5", - "@esbuild/win32-arm64": "0.21.5", - "@esbuild/win32-ia32": "0.21.5", - "@esbuild/win32-x64": "0.21.5" - } - }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint": { - "version": "8.57.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", - "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", - "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.6.1", - "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.57.1", - "@humanwhocodes/config-array": "^0.13.0", - "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "@ungap/structured-clone": "^1.2.0", - "ajv": "^6.12.4", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.3.2", - "doctrine": "^3.0.0", - "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.2.2", - "eslint-visitor-keys": "^3.4.3", - "espree": "^9.6.1", - "esquery": "^1.4.2", - "esutils": "^2.0.2", - "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", - "find-up": "^5.0.0", - "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "graphemer": "^1.4.0", - "ignore": "^5.2.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-yaml": "^4.1.0", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash.merge": "^4.6.2", - "minimatch": "^3.1.2", - "natural-compare": "^1.4.0", - "optionator": "^0.9.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, - "bin": { - "eslint": "bin/eslint.js" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint-plugin-vue": { - "version": "9.33.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.33.0.tgz", - "integrity": "sha512-174lJKuNsuDIlLpjeXc5E2Tss8P44uIimAfGD0b90k0NoirJqpG7stLuU9Vp/9ioTOrQdWVREc4mRd1BD+CvGw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "globals": "^13.24.0", - "natural-compare": "^1.4.0", - "nth-check": "^2.1.1", - "postcss-selector-parser": "^6.0.15", - "semver": "^7.6.3", - "vue-eslint-parser": "^9.4.3", - "xml-name-validator": "^4.0.0" - }, - "engines": { - "node": "^14.17.0 || >=16.0.0" - }, - "peerDependencies": { - "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0 || ^9.0.0" - } - }, - "node_modules/eslint-scope": { - "version": "7.2.2", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", - "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/eslint/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/espree": { - "version": "9.6.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", - "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "acorn": "^8.9.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/esquery": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", - "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "estraverse": "^5.1.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estree-walker": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "license": "MIT" - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/expect-type": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", - "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true, - "license": "MIT" - }, - "node_modules/fast-glob": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", - "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.8" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fast-glob/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true, - "license": "MIT" - }, - "node_modules/fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true, - "license": "MIT" - }, - "node_modules/fastq": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", - "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", - "dev": true, - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/fdir": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "node_modules/file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", - "dev": true, - "license": "MIT", - "dependencies": { - "flat-cache": "^3.0.4" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/file-saver": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/file-saver/-/file-saver-2.0.5.tgz", - "integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==", - "license": "MIT" - }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/find-root": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", - "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==", - "license": "MIT" - }, - "node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dev": true, - "license": "MIT", - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/flat-cache": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", - "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", - "dev": true, - "license": "MIT", - "dependencies": { - "flatted": "^3.2.9", - "keyv": "^4.5.3", - "rimraf": "^3.0.2" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/flatted": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", - "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", - "dev": true, - "license": "ISC" - }, - "node_modules/follow-redirects": { - "version": "1.15.11", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", - "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/foreground-child": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", - "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", - "dev": true, - "license": "ISC", - "dependencies": { - "cross-spawn": "^7.0.6", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/form-data": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", - "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", - "license": "MIT", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "es-set-tostringtag": "^2.1.0", - "hasown": "^2.0.2", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/frac": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/frac/-/frac-1.1.2.tgz", - "integrity": "sha512-w/XBfkibaTl3YDqASwfDUqkna4Z2p9cFSr1aHDt0WoMTECnRfBOv2WArlZILlqgWlmdIlALXGpM2AOhEk5W3IA==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/fraction.js": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", - "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "*" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/rawify" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true, - "license": "ISC" - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmmirror.com/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "license": "ISC", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "license": "MIT", - "dependencies": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/glob/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/globals": { - "version": "13.24.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", - "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "type-fest": "^0.20.2" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "license": "MIT", - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/graphemer": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", - "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", - "dev": true, - "license": "MIT" - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-tostringtag": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", - "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "license": "MIT", - "dependencies": { - "has-symbols": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "dev": true, - "license": "MIT", - "bin": { - "he": "bin/he" - } - }, - "node_modules/hoist-non-react-statics": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", - "license": "BSD-3-Clause", - "dependencies": { - "react-is": "^16.7.0" - } - }, - "node_modules/hoist-non-react-statics/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" - }, - "node_modules/html-encoding-sniffer": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", - "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "whatwg-encoding": "^3.1.1" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true, - "license": "MIT" - }, - "node_modules/http-proxy-agent": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", - "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.0", - "debug": "^4.3.4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/https-proxy-agent": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", - "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.2", - "debug": "4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ignore": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/import-fresh": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", - "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", - "license": "MIT", - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "dev": true, - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true, - "license": "ISC" - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", - "license": "MIT" - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, - "license": "MIT", - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-core-module": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", - "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", - "license": "MIT", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-mobile": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-mobile/-/is-mobile-5.0.0.tgz", - "integrity": "sha512-Tz/yndySvLAEXh+Uk8liFCxOwVH6YutuR74utvOcu7I9Di+DwM0mtdPVZNaVvvBUM2OXxne/NhOs1zAO7riusQ==", - "license": "MIT" - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-potential-custom-element-name": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", - "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true, - "license": "ISC" - }, - "node_modules/istanbul-lib-coverage": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", - "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-report": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", - "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "istanbul-lib-coverage": "^3.0.0", - "make-dir": "^4.0.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/istanbul-lib-source-maps": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz", - "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.23", - "debug": "^4.1.1", - "istanbul-lib-coverage": "^3.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/istanbul-reports": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", - "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "html-escaper": "^2.0.0", - "istanbul-lib-report": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, - "node_modules/jiti": { - "version": "1.21.7", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", - "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", - "dev": true, - "license": "MIT", - "bin": { - "jiti": "bin/jiti.js" - } - }, - "node_modules/js-beautify": { - "version": "1.15.4", - "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.15.4.tgz", - "integrity": "sha512-9/KXeZUKKJwqCXUdBxFJ3vPh467OCckSBmYDwSK/EtV090K+iMJ7zx2S3HLVDIWFQdqMIsZWbnaGiba18aWhaA==", - "dev": true, - "license": "MIT", - "dependencies": { - "config-chain": "^1.1.13", - "editorconfig": "^1.0.4", - "glob": "^10.4.2", - "js-cookie": "^3.0.5", - "nopt": "^7.2.1" - }, - "bin": { - "css-beautify": "js/bin/css-beautify.js", - "html-beautify": "js/bin/html-beautify.js", - "js-beautify": "js/bin/js-beautify.js" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/js-beautify/node_modules/glob": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", - "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/js-cookie": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-3.0.5.tgz", - "integrity": "sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "license": "MIT" - }, - "node_modules/js-yaml": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", - "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", - "dev": true, - "license": "MIT", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsdom": { - "version": "24.1.3", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-24.1.3.tgz", - "integrity": "sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "cssstyle": "^4.0.1", - "data-urls": "^5.0.0", - "decimal.js": "^10.4.3", - "form-data": "^4.0.0", - "html-encoding-sniffer": "^4.0.0", - "http-proxy-agent": "^7.0.2", - "https-proxy-agent": "^7.0.5", - "is-potential-custom-element-name": "^1.0.1", - "nwsapi": "^2.2.12", - "parse5": "^7.1.2", - "rrweb-cssom": "^0.7.1", - "saxes": "^6.0.0", - "symbol-tree": "^3.2.4", - "tough-cookie": "^4.1.4", - "w3c-xmlserializer": "^5.0.0", - "webidl-conversions": "^7.0.0", - "whatwg-encoding": "^3.1.1", - "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^14.0.0", - "ws": "^8.18.0", - "xml-name-validator": "^5.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "canvas": "^2.11.2" - }, - "peerDependenciesMeta": { - "canvas": { - "optional": true - } - } - }, - "node_modules/jsdom/node_modules/xml-name-validator": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", - "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/jsesc": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", - "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", - "license": "MIT", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "license": "MIT" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true, - "license": "MIT" - }, - "node_modules/json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/keyv": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", - "dev": true, - "license": "MIT", - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/lilconfig": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", - "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/antonk52" - } - }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "license": "MIT" - }, - "node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "license": "MIT", - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/loupe": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", - "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/lucide-react": { - "version": "0.469.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.469.0.tgz", - "integrity": "sha512-28vvUnnKQ/dBwiCQtwJw7QauYnE7yd2Cyp4tTTJpvglX4EMpbflcdBgrgToX2j71B3YvugK/NH3BGUk+E/p/Fw==", - "license": "ISC", - "peerDependencies": { - "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" - } - }, - "node_modules/magic-string": { - "version": "0.30.21", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", - "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.5" - } - }, - "node_modules/magicast": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz", - "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.25.4", - "@babel/types": "^7.25.4", - "source-map-js": "^1.2.0" - } - }, - "node_modules/make-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", - "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^7.5.3" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dev": true, - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "license": "MIT" - }, - "node_modules/muggle-string": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/muggle-string/-/muggle-string-0.4.1.tgz", - "integrity": "sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, - "node_modules/nanoid": { - "version": "3.3.11", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", - "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true, - "license": "MIT" - }, - "node_modules/node-releases": { - "version": "2.0.27", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", - "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", - "dev": true, - "license": "MIT" - }, - "node_modules/nopt": { - "version": "7.2.1", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-7.2.1.tgz", - "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", - "dev": true, - "license": "ISC", - "dependencies": { - "abbrev": "^2.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/npm-run-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", - "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^4.0.0", - "unicorn-magic": "^0.3.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm-run-path/node_modules/path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/nth-check": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", - "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, - "node_modules/nwsapi": { - "version": "2.2.23", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.23.tgz", - "integrity": "sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", - "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/optionator": { - "version": "0.9.4", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", - "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", - "dev": true, - "license": "MIT", - "dependencies": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.5" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "license": "MIT", - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmmirror.com/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json-from-dist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", - "dev": true, - "license": "BlueOak-1.0.0" - }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "license": "MIT", - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parse5": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", - "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "entities": "^6.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5/node_modules/entities": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", - "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/path-browserify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", - "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", - "dev": true, - "license": "MIT" - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "license": "MIT" - }, - "node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/pathe": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", - "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/pathval": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", - "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.16" - } - }, - "node_modules/picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "license": "ISC" - }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pinia": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/pinia/-/pinia-2.3.1.tgz", - "integrity": "sha512-khUlZSwt9xXCaTbbxFYBKDc/bWAGWJjOgvxETwkTN7KRm66EeT1ZdZj6i2ceh9sP2Pzqsbc704r2yngBrxBVug==", - "license": "MIT", - "dependencies": { - "@vue/devtools-api": "^6.6.3", - "vue-demi": "^0.14.10" - }, - "funding": { - "url": "https://github.com/sponsors/posva" - }, - "peerDependencies": { - "typescript": ">=4.4.4", - "vue": "^2.7.0 || ^3.5.11" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/pirates": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", - "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/pngjs": { - "version": "5.0.0", - "resolved": "https://registry.npmmirror.com/pngjs/-/pngjs-5.0.0.tgz", - "integrity": "sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw==", - "license": "MIT", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/polished": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz", - "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.17.8" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/postcss": { - "version": "8.5.6", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", - "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.11", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/postcss-import": { - "version": "15.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", - "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "postcss": "^8.0.0" - } - }, - "node_modules/postcss-js": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", - "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "camelcase-css": "^2.0.1" - }, - "engines": { - "node": "^12 || ^14 || >= 16" - }, - "peerDependencies": { - "postcss": "^8.4.21" - } - }, - "node_modules/postcss-load-config": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", - "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "lilconfig": "^3.1.1" - }, - "engines": { - "node": ">= 18" - }, - "peerDependencies": { - "jiti": ">=1.21.0", - "postcss": ">=8.0.9", - "tsx": "^4.8.1", - "yaml": "^2.4.2" - }, - "peerDependenciesMeta": { - "jiti": { - "optional": true - }, - "postcss": { - "optional": true - }, - "tsx": { - "optional": true - }, - "yaml": { - "optional": true - } - } - }, - "node_modules/postcss-nested": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", - "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.1.1" - }, - "engines": { - "node": ">=12.0" - }, - "peerDependencies": { - "postcss": "^8.2.14" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", - "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", - "dev": true, - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/proto-list": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", - "dev": true, - "license": "ISC" - }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", - "license": "MIT" - }, - "node_modules/psl": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", - "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", - "dev": true, - "license": "MIT", - "dependencies": { - "punycode": "^2.3.1" - }, - "funding": { - "url": "https://github.com/sponsors/lupomontero" - } - }, - "node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/qrcode": { - "version": "1.5.4", - "resolved": "https://registry.npmmirror.com/qrcode/-/qrcode-1.5.4.tgz", - "integrity": "sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg==", - "license": "MIT", - "dependencies": { - "dijkstrajs": "^1.0.1", - "pngjs": "^5.0.0", - "yargs": "^15.3.1" - }, - "bin": { - "qrcode": "bin/qrcode" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/react-is": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", - "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", - "license": "MIT" - }, - "node_modules/read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", - "dev": true, - "license": "MIT", - "dependencies": { - "pify": "^2.3.0" - } - }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "dev": true, - "license": "MIT", - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmmirror.com/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmmirror.com/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", - "license": "ISC" - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/resolve": { - "version": "1.22.11", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", - "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", - "license": "MIT", - "dependencies": { - "is-core-module": "^2.16.1", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/reusify": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", - "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", - "dev": true, - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/rollup": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz", - "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.8" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.55.1", - "@rollup/rollup-android-arm64": "4.55.1", - "@rollup/rollup-darwin-arm64": "4.55.1", - "@rollup/rollup-darwin-x64": "4.55.1", - "@rollup/rollup-freebsd-arm64": "4.55.1", - "@rollup/rollup-freebsd-x64": "4.55.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.55.1", - "@rollup/rollup-linux-arm-musleabihf": "4.55.1", - "@rollup/rollup-linux-arm64-gnu": "4.55.1", - "@rollup/rollup-linux-arm64-musl": "4.55.1", - "@rollup/rollup-linux-loong64-gnu": "4.55.1", - "@rollup/rollup-linux-loong64-musl": "4.55.1", - "@rollup/rollup-linux-ppc64-gnu": "4.55.1", - "@rollup/rollup-linux-ppc64-musl": "4.55.1", - "@rollup/rollup-linux-riscv64-gnu": "4.55.1", - "@rollup/rollup-linux-riscv64-musl": "4.55.1", - "@rollup/rollup-linux-s390x-gnu": "4.55.1", - "@rollup/rollup-linux-x64-gnu": "4.55.1", - "@rollup/rollup-linux-x64-musl": "4.55.1", - "@rollup/rollup-openbsd-x64": "4.55.1", - "@rollup/rollup-openharmony-arm64": "4.55.1", - "@rollup/rollup-win32-arm64-msvc": "4.55.1", - "@rollup/rollup-win32-ia32-msvc": "4.55.1", - "@rollup/rollup-win32-x64-gnu": "4.55.1", - "@rollup/rollup-win32-x64-msvc": "4.55.1", - "fsevents": "~2.3.2" - } - }, - "node_modules/rrweb-cssom": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz", - "integrity": "sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==", - "dev": true, - "license": "MIT" - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true, - "license": "MIT" - }, - "node_modules/saxes": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", - "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", - "dev": true, - "license": "ISC", - "dependencies": { - "xmlchars": "^2.2.0" - }, - "engines": { - "node": ">=v12.22.7" - } - }, - "node_modules/semver": { - "version": "7.7.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", - "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmmirror.com/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", - "license": "ISC" - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/siginfo": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", - "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", - "dev": true, - "license": "ISC" - }, - "node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-js": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", - "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ssf": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/ssf/-/ssf-0.11.2.tgz", - "integrity": "sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g==", - "license": "Apache-2.0", - "dependencies": { - "frac": "~1.1.2" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/stackback": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", - "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", - "dev": true, - "license": "MIT" - }, - "node_modules/std-env": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", - "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/stylis": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", - "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", - "license": "MIT" - }, - "node_modules/sucrase": { - "version": "3.35.1", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", - "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.2", - "commander": "^4.0.0", - "lines-and-columns": "^1.1.6", - "mz": "^2.7.0", - "pirates": "^4.0.1", - "tinyglobby": "^0.2.11", - "ts-interface-checker": "^0.1.9" - }, - "bin": { - "sucrase": "bin/sucrase", - "sucrase-node": "bin/sucrase-node" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/symbol-tree": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", - "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", - "dev": true, - "license": "MIT" - }, - "node_modules/tailwindcss": { - "version": "3.4.19", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", - "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@alloc/quick-lru": "^5.2.0", - "arg": "^5.0.2", - "chokidar": "^3.6.0", - "didyoumean": "^1.2.2", - "dlv": "^1.1.3", - "fast-glob": "^3.3.2", - "glob-parent": "^6.0.2", - "is-glob": "^4.0.3", - "jiti": "^1.21.7", - "lilconfig": "^3.1.3", - "micromatch": "^4.0.8", - "normalize-path": "^3.0.0", - "object-hash": "^3.0.0", - "picocolors": "^1.1.1", - "postcss": "^8.4.47", - "postcss-import": "^15.1.0", - "postcss-js": "^4.0.1", - "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", - "postcss-nested": "^6.2.0", - "postcss-selector-parser": "^6.1.2", - "resolve": "^1.22.8", - "sucrase": "^3.35.0" - }, - "bin": { - "tailwind": "lib/cli.js", - "tailwindcss": "lib/cli.js" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/test-exclude": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz", - "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==", - "dev": true, - "license": "ISC", - "dependencies": { - "@istanbuljs/schema": "^0.1.2", - "glob": "^10.4.1", - "minimatch": "^9.0.4" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/test-exclude/node_modules/glob": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", - "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "dev": true, - "license": "MIT" - }, - "node_modules/thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "dev": true, - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0" - } - }, - "node_modules/thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "dev": true, - "license": "MIT", - "dependencies": { - "thenify": ">= 3.1.0 < 4" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tiny-invariant": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", - "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinybench": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", - "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyexec": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", - "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyglobby": { - "version": "0.2.15", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", - "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "fdir": "^6.5.0", - "picomatch": "^4.0.3" - }, - "engines": { - "node": ">=12.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/SuperchupuDev" - } - }, - "node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/tinypool": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", - "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, - "node_modules/tinyrainbow": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", - "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/tinyspy": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", - "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/tough-cookie": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", - "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/tr46": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz", - "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==", - "dev": true, - "license": "MIT", - "dependencies": { - "punycode": "^2.3.1" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/ts-api-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz", - "integrity": "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=16" - }, - "peerDependencies": { - "typescript": ">=4.2.0" - } - }, - "node_modules/ts-interface-checker": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", - "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "license": "MIT", - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/typescript": { - "version": "5.6.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", - "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/unicorn-magic": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", - "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/universalify": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", - "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/update-browserslist-db": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", - "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "escalade": "^3.2.0", - "picocolors": "^1.1.1" - }, - "bin": { - "update-browserslist-db": "cli.js" - }, - "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/url-parse": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", - "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "node_modules/use-merge-value": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-merge-value/-/use-merge-value-1.2.0.tgz", - "integrity": "sha512-DXgG0kkgJN45TcyoXL49vJnn55LehnrmoHc7MbKi+QDBvr8dsesqws8UlyIWGHMR+JXgxc1nvY+jDGMlycsUcw==", - "license": "MIT", - "peerDependencies": { - "react": ">= 16.x" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "dev": true, - "license": "MIT" - }, - "node_modules/vite": { - "version": "5.4.21", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", - "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "^0.21.3", - "postcss": "^8.4.43", - "rollup": "^4.20.0" - }, - "bin": { - "vite": "bin/vite.js" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://github.com/vitejs/vite?sponsor=1" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - }, - "peerDependencies": { - "@types/node": "^18.0.0 || >=20.0.0", - "less": "*", - "lightningcss": "^1.21.0", - "sass": "*", - "sass-embedded": "*", - "stylus": "*", - "sugarss": "*", - "terser": "^5.4.0" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - }, - "less": { - "optional": true - }, - "lightningcss": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - }, - "stylus": { - "optional": true - }, - "sugarss": { - "optional": true - }, - "terser": { - "optional": true - } - } - }, - "node_modules/vite-node": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.9.tgz", - "integrity": "sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.3.7", - "es-module-lexer": "^1.5.4", - "pathe": "^1.1.2", - "vite": "^5.0.0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/vite-plugin-checker": { - "version": "0.9.3", - "resolved": "https://registry.npmjs.org/vite-plugin-checker/-/vite-plugin-checker-0.9.3.tgz", - "integrity": "sha512-Tf7QBjeBtG7q11zG0lvoF38/2AVUzzhMNu+Wk+mcsJ00Rk/FpJ4rmUviVJpzWkagbU13cGXvKpt7CMiqtxVTbQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.27.1", - "chokidar": "^4.0.3", - "npm-run-path": "^6.0.0", - "picocolors": "^1.1.1", - "picomatch": "^4.0.2", - "strip-ansi": "^7.1.0", - "tiny-invariant": "^1.3.3", - "tinyglobby": "^0.2.13", - "vscode-uri": "^3.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "peerDependencies": { - "@biomejs/biome": ">=1.7", - "eslint": ">=7", - "meow": "^13.2.0", - "optionator": "^0.9.4", - "stylelint": ">=16", - "typescript": "*", - "vite": ">=2.0.0", - "vls": "*", - "vti": "*", - "vue-tsc": "~2.2.10" - }, - "peerDependenciesMeta": { - "@biomejs/biome": { - "optional": true - }, - "eslint": { - "optional": true - }, - "meow": { - "optional": true - }, - "optionator": { - "optional": true - }, - "stylelint": { - "optional": true - }, - "typescript": { - "optional": true - }, - "vls": { - "optional": true - }, - "vti": { - "optional": true - }, - "vue-tsc": { - "optional": true - } - } - }, - "node_modules/vite-plugin-checker/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/vite-plugin-checker/node_modules/chokidar": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", - "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", - "dev": true, - "license": "MIT", - "dependencies": { - "readdirp": "^4.0.1" - }, - "engines": { - "node": ">= 14.16.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/vite-plugin-checker/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/vite-plugin-checker/node_modules/readdirp": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", - "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.18.0" - }, - "funding": { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/vite-plugin-checker/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/vitest": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.9.tgz", - "integrity": "sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/expect": "2.1.9", - "@vitest/mocker": "2.1.9", - "@vitest/pretty-format": "^2.1.9", - "@vitest/runner": "2.1.9", - "@vitest/snapshot": "2.1.9", - "@vitest/spy": "2.1.9", - "@vitest/utils": "2.1.9", - "chai": "^5.1.2", - "debug": "^4.3.7", - "expect-type": "^1.1.0", - "magic-string": "^0.30.12", - "pathe": "^1.1.2", - "std-env": "^3.8.0", - "tinybench": "^2.9.0", - "tinyexec": "^0.3.1", - "tinypool": "^1.0.1", - "tinyrainbow": "^1.2.0", - "vite": "^5.0.0", - "vite-node": "2.1.9", - "why-is-node-running": "^2.3.0" - }, - "bin": { - "vitest": "vitest.mjs" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@edge-runtime/vm": "*", - "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "2.1.9", - "@vitest/ui": "2.1.9", - "happy-dom": "*", - "jsdom": "*" - }, - "peerDependenciesMeta": { - "@edge-runtime/vm": { - "optional": true - }, - "@types/node": { - "optional": true - }, - "@vitest/browser": { - "optional": true - }, - "@vitest/ui": { - "optional": true - }, - "happy-dom": { - "optional": true - }, - "jsdom": { - "optional": true - } - } - }, - "node_modules/vscode-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz", - "integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/vue": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.26.tgz", - "integrity": "sha512-SJ/NTccVyAoNUJmkM9KUqPcYlY+u8OVL1X5EW9RIs3ch5H2uERxyyIUI4MRxVCSOiEcupX9xNGde1tL9ZKpimA==", - "license": "MIT", - "dependencies": { - "@vue/compiler-dom": "3.5.26", - "@vue/compiler-sfc": "3.5.26", - "@vue/runtime-dom": "3.5.26", - "@vue/server-renderer": "3.5.26", - "@vue/shared": "3.5.26" - }, - "peerDependencies": { - "typescript": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/vue-chartjs": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/vue-chartjs/-/vue-chartjs-5.3.3.tgz", - "integrity": "sha512-jqxtL8KZ6YJ5NTv6XzrzLS7osyegOi28UGNZW0h9OkDL7Sh1396ht4Dorh04aKrl2LiSalQ84WtqiG0RIJb0tA==", - "license": "MIT", - "peerDependencies": { - "chart.js": "^4.1.1", - "vue": "^3.0.0-0 || ^2.7.0" - } - }, - "node_modules/vue-component-type-helpers": { - "version": "2.2.12", - "resolved": "https://registry.npmjs.org/vue-component-type-helpers/-/vue-component-type-helpers-2.2.12.tgz", - "integrity": "sha512-YbGqHZ5/eW4SnkPNR44mKVc6ZKQoRs/Rux1sxC6rdwXb4qpbOSYfDr9DsTHolOTGmIKgM9j141mZbBeg05R1pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/vue-demi": { - "version": "0.14.10", - "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.10.tgz", - "integrity": "sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==", - "hasInstallScript": true, - "license": "MIT", - "bin": { - "vue-demi-fix": "bin/vue-demi-fix.js", - "vue-demi-switch": "bin/vue-demi-switch.js" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - }, - "peerDependencies": { - "@vue/composition-api": "^1.0.0-rc.1", - "vue": "^3.0.0-0 || ^2.6.0" - }, - "peerDependenciesMeta": { - "@vue/composition-api": { - "optional": true - } - } - }, - "node_modules/vue-eslint-parser": { - "version": "9.4.3", - "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-9.4.3.tgz", - "integrity": "sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^4.3.4", - "eslint-scope": "^7.1.1", - "eslint-visitor-keys": "^3.3.0", - "espree": "^9.3.1", - "esquery": "^1.4.0", - "lodash": "^4.17.21", - "semver": "^7.3.6" - }, - "engines": { - "node": "^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/mysticatea" - }, - "peerDependencies": { - "eslint": ">=6.0.0" - } - }, - "node_modules/vue-i18n": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/vue-i18n/-/vue-i18n-9.14.5.tgz", - "integrity": "sha512-0jQ9Em3ymWngyiIkj0+c/k7WgaPO+TNzjKSNq9BvBQaKJECqn9cd9fL4tkDhB5G1QBskGl9YxxbDAhgbFtpe2g==", - "deprecated": "v9 and v10 no longer supported. please migrate to v11. about maintenance status, see https://vue-i18n.intlify.dev/guide/maintenance.html", - "license": "MIT", - "dependencies": { - "@intlify/core-base": "9.14.5", - "@intlify/shared": "9.14.5", - "@vue/devtools-api": "^6.5.0" - }, - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - }, - "peerDependencies": { - "vue": "^3.0.0" - } - }, - "node_modules/vue-router": { - "version": "4.6.4", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.4.tgz", - "integrity": "sha512-Hz9q5sa33Yhduglwz6g9skT8OBPii+4bFn88w6J+J4MfEo4KRRpmiNG/hHHkdbRFlLBOqxN8y8gf2Fb0MTUgVg==", - "license": "MIT", - "dependencies": { - "@vue/devtools-api": "^6.6.4" - }, - "funding": { - "url": "https://github.com/sponsors/posva" - }, - "peerDependencies": { - "vue": "^3.5.0" - } - }, - "node_modules/vue-tsc": { - "version": "2.2.12", - "resolved": "https://registry.npmjs.org/vue-tsc/-/vue-tsc-2.2.12.tgz", - "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/typescript": "2.4.15", - "@vue/language-core": "2.2.12" - }, - "bin": { - "vue-tsc": "bin/vue-tsc.js" - }, - "peerDependencies": { - "typescript": ">=5.0.0" - } - }, - "node_modules/w3c-xmlserializer": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", - "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", - "dev": true, - "license": "MIT", - "dependencies": { - "xml-name-validator": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/w3c-xmlserializer/node_modules/xml-name-validator": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", - "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/webidl-conversions": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", - "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=12" - } - }, - "node_modules/whatwg-encoding": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", - "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", - "deprecated": "Use @exodus/bytes instead for a more spec-conformant and faster implementation", - "dev": true, - "license": "MIT", - "dependencies": { - "iconv-lite": "0.6.3" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/whatwg-mimetype": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", - "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - } - }, - "node_modules/whatwg-url": { - "version": "14.2.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz", - "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==", - "dev": true, - "license": "MIT", - "dependencies": { - "tr46": "^5.1.0", - "webidl-conversions": "^7.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/which-module": { - "version": "2.0.1", - "resolved": "https://registry.npmmirror.com/which-module/-/which-module-2.0.1.tgz", - "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==", - "license": "ISC" - }, - "node_modules/why-is-node-running": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", - "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", - "dev": true, - "license": "MIT", - "dependencies": { - "siginfo": "^2.0.0", - "stackback": "0.0.2" - }, - "bin": { - "why-is-node-running": "cli.js" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wmf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wmf/-/wmf-1.0.2.tgz", - "integrity": "sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/word": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/word/-/word-0.3.0.tgz", - "integrity": "sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/word-wrap": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", - "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/ws": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.19.0.tgz", - "integrity": "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xlsx": { - "version": "0.18.5", - "resolved": "https://registry.npmjs.org/xlsx/-/xlsx-0.18.5.tgz", - "integrity": "sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ==", - "license": "Apache-2.0", - "dependencies": { - "adler-32": "~1.3.0", - "cfb": "~1.2.1", - "codepage": "~1.15.0", - "crc-32": "~1.2.1", - "ssf": "~0.11.2", - "wmf": "~1.0.1", - "word": "~0.3.0" - }, - "bin": { - "xlsx": "bin/xlsx.njs" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/xml-name-validator": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", - "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12" - } - }, - "node_modules/xmlchars": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", - "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", - "dev": true, - "license": "MIT" - }, - "node_modules/y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmmirror.com/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==", - "license": "ISC" - }, - "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", - "license": "ISC", - "engines": { - "node": ">= 6" - } - }, - "node_modules/yargs": { - "version": "15.4.1", - "resolved": "https://registry.npmmirror.com/yargs/-/yargs-15.4.1.tgz", - "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", - "license": "MIT", - "dependencies": { - "cliui": "^6.0.0", - "decamelize": "^1.2.0", - "find-up": "^4.1.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^4.2.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^18.1.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs-parser": { - "version": "18.1.3", - "resolved": "https://registry.npmmirror.com/yargs-parser/-/yargs-parser-18.1.3.tgz", - "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", - "license": "ISC", - "dependencies": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/yargs/node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmmirror.com/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "license": "MIT", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmmirror.com/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "license": "MIT", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmmirror.com/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "license": "MIT", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/yargs/node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmmirror.com/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "license": "MIT", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - } - } -} diff --git a/frontend/package.json b/frontend/package.json index 8e1fdb4b..38b92708 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -19,8 +19,10 @@ "@vueuse/core": "^10.7.0", "axios": "^1.6.2", "chart.js": "^4.4.1", + "dompurify": "^3.3.1", "driver.js": "^1.4.0", "file-saver": "^2.0.5", + "marked": "^17.0.1", "pinia": "^2.1.7", "qrcode": "^1.5.4", "vue": "^3.4.0", @@ -30,6 +32,7 @@ "xlsx": "^0.18.5" }, "devDependencies": { + "@types/dompurify": "^3.0.5", "@types/file-saver": "^2.0.7", "@types/mdx": "^2.0.13", "@types/node": "^20.10.5", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index df82dcdb..7dc73325 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -20,12 +20,18 @@ importers: chart.js: specifier: ^4.4.1 version: 4.5.1 + dompurify: + specifier: ^3.3.1 + version: 3.3.1 driver.js: specifier: ^1.4.0 version: 1.4.0 file-saver: specifier: ^2.0.5 version: 2.0.5 + marked: + specifier: ^17.0.1 + version: 17.0.1 pinia: specifier: ^2.1.7 version: 2.3.1(typescript@5.6.3)(vue@3.5.26(typescript@5.6.3)) @@ -48,6 +54,9 @@ importers: specifier: ^0.18.5 version: 0.18.5 devDependencies: + '@types/dompurify': + specifier: ^3.0.5 + version: 3.2.0 '@types/file-saver': specifier: ^2.0.7 version: 2.0.7 @@ -1460,6 +1469,10 @@ packages: '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + '@types/dompurify@3.2.0': + resolution: {integrity: sha512-Fgg31wv9QbLDA0SpTOXO3MaxySc4DKGLi8sna4/Utjo4r3ZRPdCt4UQee8BWr+Q5z21yifghREPJGYaEOEIACg==} + deprecated: This is a stub types definition. dompurify provides its own type definitions, so you do not need this installed. + '@types/estree-jsx@1.0.5': resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} @@ -5901,6 +5914,10 @@ snapshots: dependencies: '@types/ms': 2.1.0 + '@types/dompurify@3.2.0': + dependencies: + dompurify: 3.3.1 + '@types/estree-jsx@1.0.5': dependencies: '@types/estree': 1.0.8 diff --git a/frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue b/frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue index bd90af42..355f5399 100644 --- a/frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue +++ b/frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue @@ -323,6 +323,7 @@ function ensureSelectionPath(groupIndex: number, condIndex: number) { if (!subscriptionSelections[groupIndex][condIndex]) subscriptionSelections[groupIndex][condIndex] = [] } +// Sync from modelValue to subscriptionSelections (one-way: model -> local state) watch( () => props.modelValue, (v) => { @@ -333,20 +334,34 @@ watch( const c = allOf[ci] if (c?.type === 'subscription') { ensureSelectionPath(gi, ci) - subscriptionSelections[gi][ci] = (c.group_ids ?? []).slice() + // Only update if different to avoid triggering unnecessary updates + const newIds = (c.group_ids ?? []).slice() + const currentIds = subscriptionSelections[gi]?.[ci] ?? [] + if (JSON.stringify(newIds.sort()) !== JSON.stringify(currentIds.sort())) { + subscriptionSelections[gi][ci] = newIds + } } } } }, - { immediate: true, deep: true } + { immediate: true } ) +// Sync from subscriptionSelections to modelValue (one-way: local state -> model) +// Use a debounced approach to avoid infinite loops +let syncTimeout: ReturnType | null = null watch( () => subscriptionSelections, () => { - // sync back to targeting - updateTargeting((draft) => { - const groups = draft.any_of ?? [] + // Debounce the sync to avoid rapid fire updates + if (syncTimeout) clearTimeout(syncTimeout) + + syncTimeout = setTimeout(() => { + // Build the new targeting state + const newTargeting: TargetingDraft = JSON.parse(JSON.stringify(props.modelValue ?? { any_of: [] })) + if (!newTargeting.any_of) newTargeting.any_of = [] + + const groups = newTargeting.any_of ?? [] for (let gi = 0; gi < groups.length; gi++) { const allOf = groups[gi]?.all_of ?? [] for (let ci = 0; ci < allOf.length; ci++) { @@ -358,7 +373,12 @@ watch( } } } - }) + + // Only emit if there's an actual change (deep comparison) + if (JSON.stringify(props.modelValue) !== JSON.stringify(newTargeting)) { + emit('update:modelValue', newTargeting) + } + }, 0) }, { deep: true } ) diff --git a/frontend/src/components/common/AnnouncementBell.vue b/frontend/src/components/common/AnnouncementBell.vue new file mode 100644 index 00000000..9d00f9be --- /dev/null +++ b/frontend/src/components/common/AnnouncementBell.vue @@ -0,0 +1,626 @@ + + + + + + + diff --git a/frontend/src/components/icons/Icon.vue b/frontend/src/components/icons/Icon.vue index c8ab8aed..1f055111 100644 --- a/frontend/src/components/icons/Icon.vue +++ b/frontend/src/components/icons/Icon.vue @@ -107,6 +107,9 @@ const icons = { database: 'M20.25 6.375c0 2.278-3.694 4.125-8.25 4.125S3.75 8.653 3.75 6.375m16.5 0c0-2.278-3.694-4.125-8.25-4.125S3.75 4.097 3.75 6.375m16.5 0v11.25c0 2.278-3.694 4.125-8.25 4.125s-8.25-1.847-8.25-4.125V6.375m16.5 0v3.75m-16.5-3.75v3.75m16.5 0v3.75C20.25 16.153 16.556 18 12 18s-8.25-1.847-8.25-4.125v-3.75m16.5 0c0 2.278-3.694 4.125-8.25 4.125s-8.25-1.847-8.25-4.125', cube: 'M20 7l-8-4-8 4m16 0l-8 4m8-4v10l-8 4m0-10L4 7m8 4v10M4 7v10l8 4', + // Notification + bell: 'M15 17h5l-1.405-1.405A2.032 2.032 0 0118 14.158V11a6.002 6.002 0 00-4-5.659V5a2 2 0 10-4 0v.341C7.67 6.165 6 8.388 6 11v3.159c0 .538-.214 1.055-.595 1.436L4 17h5m6 0v1a3 3 0 11-6 0v-1m6 0H9', + // Misc bolt: 'M13 10V3L4 14h7v7l9-11h-7z', sparkles: 'M9.813 15.904L9 18.75l-.813-2.846a4.5 4.5 0 00-3.09-3.09L2.25 12l2.846-.813a4.5 4.5 0 003.09-3.09L9 5.25l.813 2.846a4.5 4.5 0 003.09 3.09L15.75 12l-2.846.813a4.5 4.5 0 00-3.09 3.09zM18.259 8.715L18 9.75l-.259-1.035a3.375 3.375 0 00-2.455-2.456L14.25 6l1.036-.259a3.375 3.375 0 002.455-2.456L18 2.25l.259 1.035a3.375 3.375 0 002.456 2.456L21.75 6l-1.035.259a3.375 3.375 0 00-2.456 2.456z', diff --git a/frontend/src/components/layout/AppHeader.vue b/frontend/src/components/layout/AppHeader.vue index 9d2b40fb..6b5849c0 100644 --- a/frontend/src/components/layout/AppHeader.vue +++ b/frontend/src/components/layout/AppHeader.vue @@ -21,8 +21,11 @@
- +
+ + + { const items = [ { path: '/dashboard', label: t('nav.dashboard'), icon: DashboardIcon }, - { path: '/announcements', label: t('nav.announcements'), icon: BellIcon }, { path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon }, { path: '/usage', label: t('nav.usage'), icon: ChartIcon, hideInSimpleMode: true }, { path: '/subscriptions', label: t('nav.mySubscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, @@ -456,7 +455,6 @@ const userNavItems = computed(() => { // Personal navigation items (for admin's "My Account" section, without Dashboard) const personalNavItems = computed(() => { const items = [ - { path: '/announcements', label: t('nav.announcements'), icon: BellIcon }, { path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon }, { path: '/usage', label: t('nav.usage'), icon: ChartIcon, hideInSimpleMode: true }, { path: '/subscriptions', label: t('nav.mySubscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 61f487a8..bb7defd8 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -3139,13 +3139,22 @@ export default { description: 'View system announcements', unreadOnly: 'Show unread only', markRead: 'Mark as read', + markAllRead: 'Mark all as read', + viewAll: 'View all announcements', + markedAsRead: 'Marked as read', + allMarkedAsRead: 'All announcements marked as read', + newCount: '{count} new announcement | {count} new announcements', readAt: 'Read at', read: 'Read', unread: 'Unread', startsAt: 'Starts at', endsAt: 'Ends at', empty: 'No announcements', - emptyUnread: 'No unread announcements' + emptyUnread: 'No unread announcements', + total: 'announcements', + emptyDescription: 'There are no system announcements at this time', + readStatus: 'You have read this announcement', + markReadHint: 'Click "Mark as read" to mark this announcement' }, // User Subscriptions Page diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index bf7806b5..2e6230b2 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -3288,13 +3288,22 @@ export default { description: '查看系统公告', unreadOnly: '仅显示未读', markRead: '标记已读', + markAllRead: '全部已读', + viewAll: '查看全部公告', + markedAsRead: '已标记为已读', + allMarkedAsRead: '所有公告已标记为已读', + newCount: '有 {count} 条新公告', readAt: '已读时间', read: '已读', unread: '未读', startsAt: '开始时间', endsAt: '结束时间', empty: '暂无公告', - emptyUnread: '暂无未读公告' + emptyUnread: '暂无未读公告', + total: '条公告', + emptyDescription: '暂时没有任何系统公告', + readStatus: '您已阅读此公告', + markReadHint: '点击"已读"标记此公告' }, // User Subscriptions Page diff --git a/frontend/src/router/index.ts b/frontend/src/router/index.ts index 4e2c1147..4bb46cee 100644 --- a/frontend/src/router/index.ts +++ b/frontend/src/router/index.ts @@ -187,18 +187,6 @@ const routes: RouteRecordRaw[] = [ descriptionKey: 'purchase.description' } }, - { - path: '/announcements', - name: 'Announcements', - component: () => import('@/views/user/AnnouncementsView.vue'), - meta: { - requiresAuth: true, - requiresAdmin: false, - title: 'Announcements', - titleKey: 'announcements.title', - descriptionKey: 'announcements.description' - } - }, // ==================== Admin Routes ==================== { diff --git a/frontend/src/utils/format.ts b/frontend/src/utils/format.ts index 78e45354..e6535c2e 100644 --- a/frontend/src/utils/format.ts +++ b/frontend/src/utils/format.ts @@ -261,3 +261,22 @@ export function formatCountdownWithSuffix(targetDate: string | Date | null | und if (!countdown) return null return i18n.global.t('common.time.countdown.withSuffix', { time: countdown }) } + +/** + * 格式化为相对时间 + 具体时间组合 + * @param date 日期字符串或 Date 对象 + * @returns 组合时间字符串,如 "5 天前 · 2026-01-27 15:25" + */ +export function formatRelativeWithDateTime(date: string | Date | null | undefined): string { + if (!date) return '' + + const relativeTime = formatRelativeTime(date) + const dateTime = formatDateTime(date) + + // 如果是 "从未" 或空字符串,只返回相对时间 + if (!dateTime || relativeTime === i18n.global.t('common.time.never')) { + return relativeTime + } + + return `${relativeTime} · ${dateTime}` +} diff --git a/frontend/src/views/user/AnnouncementsView.vue b/frontend/src/views/user/AnnouncementsView.vue deleted file mode 100644 index 99ea253e..00000000 --- a/frontend/src/views/user/AnnouncementsView.vue +++ /dev/null @@ -1,140 +0,0 @@ - - - From 5cda979209715e52dc02d459e9c7ded53a1a96ca Mon Sep 17 00:00:00 2001 From: shaw Date: Mon, 2 Feb 2026 16:17:07 +0800 Subject: [PATCH 070/214] =?UTF-8?q?feat(deploy):=20=E4=BC=98=E5=8C=96=20Do?= =?UTF-8?q?cker=20=E9=83=A8=E7=BD=B2=E4=BD=93=E9=AA=8C=EF=BC=8C=E6=96=B0?= =?UTF-8?q?=E5=A2=9E=E4=B8=80=E9=94=AE=E9=83=A8=E7=BD=B2=E8=84=9A=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## 新增功能 - 新增 docker-compose.local.yml:使用本地目录存储数据,便于迁移和备份 - 新增 docker-deploy.sh:一键部署脚本,自动生成安全密钥(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD) - 新增 deploy/.gitignore:忽略运行时数据目录 ## 优化改进 - docker-compose.local.yml 包含 PGDATA 环境变量修复,解决 PostgreSQL 18 Alpine 数据丢失问题 - 脚本自动设置 .env 文件权限为 600,增强安全性 - 脚本显示生成的凭证,方便用户记录 ## 文档更新 - 更新 README.md(英文版):新增"快速开始"章节,添加部署版本对比表 - 更新 README_CN.md(中文版):同步英文版更新 - 更新 deploy/README.md:详细说明两种部署方式和迁移方法 ## 使用方式 一键部署: ```bash curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash docker-compose -f docker-compose.local.yml up -d ``` 轻松迁移: ```bash tar czf sub2api-complete.tar.gz deploy/ # 传输到新服务器后直接解压启动即可 ``` --- README.md | 128 +++++++++++++----- README_CN.md | 128 +++++++++++++----- deploy/.gitignore | 19 +++ deploy/README.md | 147 ++++++++++++++++++++- deploy/docker-compose.local.yml | 222 ++++++++++++++++++++++++++++++++ deploy/docker-deploy.sh | 171 ++++++++++++++++++++++++ 6 files changed, 750 insertions(+), 65 deletions(-) create mode 100644 deploy/.gitignore create mode 100644 deploy/docker-compose.local.yml create mode 100644 deploy/docker-deploy.sh diff --git a/README.md b/README.md index e8e9c5a5..14656332 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,7 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install --- -### Method 2: Docker Compose +### Method 2: Docker Compose (Recommended) Deploy with Docker Compose, including PostgreSQL and Redis containers. @@ -137,87 +137,157 @@ Deploy with Docker Compose, including PostgreSQL and Redis containers. - Docker 20.10+ - Docker Compose v2+ -#### Installation Steps +#### Quick Start (One-Click Deployment) + +Use the automated deployment script for easy setup: + +```bash +# Create deployment directory +mkdir -p sub2api-deploy && cd sub2api-deploy + +# Download and run deployment preparation script +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash + +# Start services +docker-compose -f docker-compose.local.yml up -d + +# View logs +docker-compose -f docker-compose.local.yml logs -f sub2api +``` + +**What the script does:** +- Downloads `docker-compose.local.yml` and `.env.example` +- Generates secure credentials (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD) +- Creates `.env` file with auto-generated secrets +- Creates data directories (uses local directories for easy backup/migration) +- Displays generated credentials for your reference + +#### Manual Deployment + +If you prefer manual setup: ```bash # 1. Clone the repository git clone https://github.com/Wei-Shaw/sub2api.git -cd sub2api +cd sub2api/deploy -# 2. Enter the deploy directory -cd deploy - -# 3. Copy environment configuration +# 2. Copy environment configuration cp .env.example .env -# 4. Edit configuration (set your passwords) +# 3. Edit configuration (generate secure passwords) nano .env ``` **Required configuration in `.env`:** ```bash -# PostgreSQL password (REQUIRED - change this!) +# PostgreSQL password (REQUIRED) POSTGRES_PASSWORD=your_secure_password_here +# JWT Secret (RECOMMENDED - keeps users logged in after restart) +JWT_SECRET=your_jwt_secret_here + +# TOTP Encryption Key (RECOMMENDED - preserves 2FA after restart) +TOTP_ENCRYPTION_KEY=your_totp_key_here + # Optional: Admin account ADMIN_EMAIL=admin@example.com ADMIN_PASSWORD=your_admin_password # Optional: Custom port SERVER_PORT=8080 +``` -# Optional: Security configuration -# Enable URL allowlist validation (false to skip allowlist checks, only basic format validation) -SECURITY_URL_ALLOWLIST_ENABLED=false +**Generate secure secrets:** +```bash +# Generate JWT_SECRET +openssl rand -hex 32 -# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https) -# ⚠️ WARNING: Enabling this allows HTTP (plaintext) URLs which can expose API keys -# Only recommended for: -# - Development/testing environments -# - Internal networks with trusted endpoints -# - When using local test servers (http://localhost) -# PRODUCTION: Keep this false or use HTTPS URLs only -SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false +# Generate TOTP_ENCRYPTION_KEY +openssl rand -hex 32 -# Allow private IP addresses for upstream/pricing/CRS (for internal deployments) -SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false +# Generate POSTGRES_PASSWORD +openssl rand -hex 32 ``` ```bash +# 4. Create data directories (for local version) +mkdir -p data postgres_data redis_data + # 5. Start all services +# Option A: Local directory version (recommended - easy migration) +docker-compose -f docker-compose.local.yml up -d + +# Option B: Named volumes version (simple setup) docker-compose up -d # 6. Check status -docker-compose ps +docker-compose -f docker-compose.local.yml ps # 7. View logs -docker-compose logs -f sub2api +docker-compose -f docker-compose.local.yml logs -f sub2api ``` +#### Deployment Versions + +| Version | Data Storage | Migration | Best For | +|---------|-------------|-----------|----------| +| **docker-compose.local.yml** | Local directories | ✅ Easy (tar entire directory) | Production, frequent backups | +| **docker-compose.yml** | Named volumes | ⚠️ Requires docker commands | Simple setup | + +**Recommendation:** Use `docker-compose.local.yml` (deployed by script) for easier data management. + #### Access Open `http://YOUR_SERVER_IP:8080` in your browser. +If admin password was auto-generated, find it in logs: +```bash +docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password" +``` + #### Upgrade ```bash # Pull latest image and recreate container -docker-compose pull -docker-compose up -d +docker-compose -f docker-compose.local.yml pull +docker-compose -f docker-compose.local.yml up -d +``` + +#### Easy Migration (Local Directory Version) + +When using `docker-compose.local.yml`, migrate to a new server easily: + +```bash +# On source server +docker-compose -f docker-compose.local.yml down +cd .. +tar czf sub2api-complete.tar.gz sub2api-deploy/ + +# Transfer to new server +scp sub2api-complete.tar.gz user@new-server:/path/ + +# On new server +tar xzf sub2api-complete.tar.gz +cd sub2api-deploy/ +docker-compose -f docker-compose.local.yml up -d ``` #### Useful Commands ```bash # Stop all services -docker-compose down +docker-compose -f docker-compose.local.yml down # Restart -docker-compose restart +docker-compose -f docker-compose.local.yml restart # View all logs -docker-compose logs -f +docker-compose -f docker-compose.local.yml logs -f + +# Remove all data (caution!) +docker-compose -f docker-compose.local.yml down +rm -rf data/ postgres_data/ redis_data/ ``` --- diff --git a/README_CN.md b/README_CN.md index 41d399d5..e609f25d 100644 --- a/README_CN.md +++ b/README_CN.md @@ -135,7 +135,7 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install --- -### 方式二:Docker Compose +### 方式二:Docker Compose(推荐) 使用 Docker Compose 部署,包含 PostgreSQL 和 Redis 容器。 @@ -144,87 +144,157 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install - Docker 20.10+ - Docker Compose v2+ -#### 安装步骤 +#### 快速开始(一键部署) + +使用自动化部署脚本快速搭建: + +```bash +# 创建部署目录 +mkdir -p sub2api-deploy && cd sub2api-deploy + +# 下载并运行部署准备脚本 +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash + +# 启动服务 +docker-compose -f docker-compose.local.yml up -d + +# 查看日志 +docker-compose -f docker-compose.local.yml logs -f sub2api +``` + +**脚本功能:** +- 下载 `docker-compose.local.yml` 和 `.env.example` +- 自动生成安全凭证(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD) +- 创建 `.env` 文件并填充自动生成的密钥 +- 创建数据目录(使用本地目录,便于备份和迁移) +- 显示生成的凭证供你记录 + +#### 手动部署 + +如果你希望手动配置: ```bash # 1. 克隆仓库 git clone https://github.com/Wei-Shaw/sub2api.git -cd sub2api +cd sub2api/deploy -# 2. 进入 deploy 目录 -cd deploy - -# 3. 复制环境配置文件 +# 2. 复制环境配置文件 cp .env.example .env -# 4. 编辑配置(设置密码等) +# 3. 编辑配置(生成安全密码) nano .env ``` **`.env` 必须配置项:** ```bash -# PostgreSQL 密码(必须修改!) +# PostgreSQL 密码(必需) POSTGRES_PASSWORD=your_secure_password_here +# JWT 密钥(推荐 - 重启后保持用户登录状态) +JWT_SECRET=your_jwt_secret_here + +# TOTP 加密密钥(推荐 - 重启后保留双因素认证) +TOTP_ENCRYPTION_KEY=your_totp_key_here + # 可选:管理员账号 ADMIN_EMAIL=admin@example.com ADMIN_PASSWORD=your_admin_password # 可选:自定义端口 SERVER_PORT=8080 +``` -# 可选:安全配置 -# 启用 URL 白名单验证(false 则跳过白名单检查,仅做基本格式校验) -SECURITY_URL_ALLOWLIST_ENABLED=false +**生成安全密钥:** +```bash +# 生成 JWT_SECRET +openssl rand -hex 32 -# 关闭白名单时,是否允许 http:// URL(默认 false,只允许 https://) -# ⚠️ 警告:允许 HTTP 会暴露 API 密钥(明文传输) -# 仅建议在以下场景使用: -# - 开发/测试环境 -# - 内部可信网络 -# - 本地测试服务器(http://localhost) -# 生产环境:保持 false 或仅使用 HTTPS URL -SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false +# 生成 TOTP_ENCRYPTION_KEY +openssl rand -hex 32 -# 是否允许私有 IP 地址用于上游/定价/CRS(内网部署时使用) -SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false +# 生成 POSTGRES_PASSWORD +openssl rand -hex 32 ``` ```bash +# 4. 创建数据目录(本地版) +mkdir -p data postgres_data redis_data + # 5. 启动所有服务 +# 选项 A:本地目录版(推荐 - 易于迁移) +docker-compose -f docker-compose.local.yml up -d + +# 选项 B:命名卷版(简单设置) docker-compose up -d # 6. 查看状态 -docker-compose ps +docker-compose -f docker-compose.local.yml ps # 7. 查看日志 -docker-compose logs -f sub2api +docker-compose -f docker-compose.local.yml logs -f sub2api ``` +#### 部署版本对比 + +| 版本 | 数据存储 | 迁移便利性 | 适用场景 | +|------|---------|-----------|---------| +| **docker-compose.local.yml** | 本地目录 | ✅ 简单(打包整个目录) | 生产环境、频繁备份 | +| **docker-compose.yml** | 命名卷 | ⚠️ 需要 docker 命令 | 简单设置 | + +**推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。 + #### 访问 在浏览器中打开 `http://你的服务器IP:8080` +如果管理员密码是自动生成的,在日志中查找: +```bash +docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password" +``` + #### 升级 ```bash # 拉取最新镜像并重建容器 -docker-compose pull -docker-compose up -d +docker-compose -f docker-compose.local.yml pull +docker-compose -f docker-compose.local.yml up -d +``` + +#### 轻松迁移(本地目录版) + +使用 `docker-compose.local.yml` 时,可以轻松迁移到新服务器: + +```bash +# 源服务器 +docker-compose -f docker-compose.local.yml down +cd .. +tar czf sub2api-complete.tar.gz sub2api-deploy/ + +# 传输到新服务器 +scp sub2api-complete.tar.gz user@new-server:/path/ + +# 新服务器 +tar xzf sub2api-complete.tar.gz +cd sub2api-deploy/ +docker-compose -f docker-compose.local.yml up -d ``` #### 常用命令 ```bash # 停止所有服务 -docker-compose down +docker-compose -f docker-compose.local.yml down # 重启 -docker-compose restart +docker-compose -f docker-compose.local.yml restart # 查看所有日志 -docker-compose logs -f +docker-compose -f docker-compose.local.yml logs -f + +# 删除所有数据(谨慎!) +docker-compose -f docker-compose.local.yml down +rm -rf data/ postgres_data/ redis_data/ ``` --- diff --git a/deploy/.gitignore b/deploy/.gitignore new file mode 100644 index 00000000..29a15135 --- /dev/null +++ b/deploy/.gitignore @@ -0,0 +1,19 @@ +# ============================================================================= +# Sub2API Deploy Directory - Git Ignore +# ============================================================================= + +# Data directories (generated at runtime when using docker-compose.local.yml) +data/ +postgres_data/ +redis_data/ + +# Environment configuration (contains sensitive information) +.env + +# Backup files +*.backup +*.bak + +# Temporary files +*.tmp +*.log diff --git a/deploy/README.md b/deploy/README.md index ed4ea721..091d8ad7 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -13,7 +13,9 @@ This directory contains files for deploying Sub2API on Linux servers. | File | Description | |------|-------------| -| `docker-compose.yml` | Docker Compose configuration | +| `docker-compose.yml` | Docker Compose configuration (named volumes) | +| `docker-compose.local.yml` | Docker Compose configuration (local directories, easy migration) | +| `docker-deploy.sh` | **One-click Docker deployment script (recommended)** | | `.env.example` | Docker environment variables template | | `DOCKER.md` | Docker Hub documentation | | `install.sh` | One-click binary installation script | @@ -24,7 +26,45 @@ This directory contains files for deploying Sub2API on Linux servers. ## Docker Deployment (Recommended) -### Quick Start +### Method 1: One-Click Deployment (Recommended) + +Use the automated preparation script for the easiest setup: + +```bash +# Download and run the preparation script +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash + +# Or download first, then run +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh -o docker-deploy.sh +chmod +x docker-deploy.sh +./docker-deploy.sh +``` + +**What the script does:** +- Downloads `docker-compose.local.yml` and `.env.example` +- Automatically generates secure secrets (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD) +- Creates `.env` file with generated secrets +- Creates necessary data directories (data/, postgres_data/, redis_data/) +- **Displays generated credentials** (POSTGRES_PASSWORD, JWT_SECRET, etc.) + +**After running the script:** +```bash +# Start services +docker-compose -f docker-compose.local.yml up -d + +# View logs +docker-compose -f docker-compose.local.yml logs -f sub2api + +# If admin password was auto-generated, find it in logs: +docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password" + +# Access Web UI +# http://localhost:8080 +``` + +### Method 2: Manual Deployment + +If you prefer manual control: ```bash # Clone repository @@ -33,18 +73,36 @@ cd sub2api/deploy # Configure environment cp .env.example .env -nano .env # Set POSTGRES_PASSWORD (required) +nano .env # Set POSTGRES_PASSWORD and other required variables -# Start all services -docker-compose up -d +# Generate secure secrets (recommended) +JWT_SECRET=$(openssl rand -hex 32) +TOTP_ENCRYPTION_KEY=$(openssl rand -hex 32) +echo "JWT_SECRET=${JWT_SECRET}" >> .env +echo "TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}" >> .env + +# Create data directories +mkdir -p data postgres_data redis_data + +# Start all services using local directory version +docker-compose -f docker-compose.local.yml up -d # View logs (check for auto-generated admin password) -docker-compose logs -f sub2api +docker-compose -f docker-compose.local.yml logs -f sub2api # Access Web UI # http://localhost:8080 ``` +### Deployment Version Comparison + +| Version | Data Storage | Migration | Best For | +|---------|-------------|-----------|----------| +| **docker-compose.local.yml** | Local directories (./data, ./postgres_data, ./redis_data) | ✅ Easy (tar entire directory) | Production, need frequent backups/migration | +| **docker-compose.yml** | Named volumes (/var/lib/docker/volumes/) | ⚠️ Requires docker commands | Simple setup, don't need migration | + +**Recommendation:** Use `docker-compose.local.yml` (deployed by `docker-deploy.sh`) for easier data management and migration. + ### How Auto-Setup Works When using Docker Compose with `AUTO_SETUP=true`: @@ -89,6 +147,32 @@ SELECT ### Commands +For **local directory version** (docker-compose.local.yml): + +```bash +# Start services +docker-compose -f docker-compose.local.yml up -d + +# Stop services +docker-compose -f docker-compose.local.yml down + +# View logs +docker-compose -f docker-compose.local.yml logs -f sub2api + +# Restart Sub2API only +docker-compose -f docker-compose.local.yml restart sub2api + +# Update to latest version +docker-compose -f docker-compose.local.yml pull +docker-compose -f docker-compose.local.yml up -d + +# Remove all data (caution!) +docker-compose -f docker-compose.local.yml down +rm -rf data/ postgres_data/ redis_data/ +``` + +For **named volumes version** (docker-compose.yml): + ```bash # Start services docker-compose up -d @@ -115,10 +199,11 @@ docker-compose down -v | Variable | Required | Default | Description | |----------|----------|---------|-------------| | `POSTGRES_PASSWORD` | **Yes** | - | PostgreSQL password | +| `JWT_SECRET` | **Recommended** | *(auto-generated)* | JWT secret (fixed for persistent sessions) | +| `TOTP_ENCRYPTION_KEY` | **Recommended** | *(auto-generated)* | TOTP encryption key (fixed for persistent 2FA) | | `SERVER_PORT` | No | `8080` | Server port | | `ADMIN_EMAIL` | No | `admin@sub2api.local` | Admin email | | `ADMIN_PASSWORD` | No | *(auto-generated)* | Admin password | -| `JWT_SECRET` | No | *(auto-generated)* | JWT secret | | `TZ` | No | `Asia/Shanghai` | Timezone | | `GEMINI_OAUTH_CLIENT_ID` | No | *(builtin)* | Google OAuth client ID (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. | | `GEMINI_OAUTH_CLIENT_SECRET` | No | *(builtin)* | Google OAuth client secret (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. | @@ -127,6 +212,30 @@ docker-compose down -v See `.env.example` for all available options. +> **Note:** The `docker-deploy.sh` script automatically generates `JWT_SECRET`, `TOTP_ENCRYPTION_KEY`, and `POSTGRES_PASSWORD` for you. + +### Easy Migration (Local Directory Version) + +When using `docker-compose.local.yml`, all data is stored in local directories, making migration simple: + +```bash +# On source server: Stop services and create archive +cd /path/to/deployment +docker-compose -f docker-compose.local.yml down +cd .. +tar czf sub2api-complete.tar.gz deployment/ + +# Transfer to new server +scp sub2api-complete.tar.gz user@new-server:/path/to/destination/ + +# On new server: Extract and start +tar xzf sub2api-complete.tar.gz +cd deployment/ +docker-compose -f docker-compose.local.yml up -d +``` + +Your entire deployment (configuration + data) is migrated! + --- ## Gemini OAuth Configuration @@ -359,6 +468,30 @@ The main config file is at `/etc/sub2api/config.yaml` (created by Setup Wizard). ### Docker +For **local directory version**: + +```bash +# Check container status +docker-compose -f docker-compose.local.yml ps + +# View detailed logs +docker-compose -f docker-compose.local.yml logs --tail=100 sub2api + +# Check database connection +docker-compose -f docker-compose.local.yml exec postgres pg_isready + +# Check Redis connection +docker-compose -f docker-compose.local.yml exec redis redis-cli ping + +# Restart all services +docker-compose -f docker-compose.local.yml restart + +# Check data directories +ls -la data/ postgres_data/ redis_data/ +``` + +For **named volumes version**: + ```bash # Check container status docker-compose ps diff --git a/deploy/docker-compose.local.yml b/deploy/docker-compose.local.yml new file mode 100644 index 00000000..05ce129a --- /dev/null +++ b/deploy/docker-compose.local.yml @@ -0,0 +1,222 @@ +# ============================================================================= +# Sub2API Docker Compose - Local Directory Version +# ============================================================================= +# This configuration uses local directories for data storage instead of named +# volumes, making it easy to migrate the entire deployment by simply copying +# the deploy directory. +# +# Quick Start: +# 1. Copy .env.example to .env and configure +# 2. mkdir -p data postgres_data redis_data +# 3. docker-compose -f docker-compose.local.yml up -d +# 4. Check logs: docker-compose -f docker-compose.local.yml logs -f sub2api +# 5. Access: http://localhost:8080 +# +# Migration to New Server: +# 1. docker-compose -f docker-compose.local.yml down +# 2. tar czf sub2api-deploy.tar.gz deploy/ +# 3. Transfer to new server and extract +# 4. docker-compose -f docker-compose.local.yml up -d +# ============================================================================= + +services: + # =========================================================================== + # Sub2API Application + # =========================================================================== + sub2api: + image: weishaw/sub2api:latest + container_name: sub2api + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + ports: + - "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080" + volumes: + # Local directory mapping for easy migration + - ./data:/app/data + # Optional: Mount custom config.yaml (uncomment and create the file first) + # Copy config.example.yaml to config.yaml, modify it, then uncomment: + # - ./config.yaml:/app/data/config.yaml:ro + environment: + # ======================================================================= + # Auto Setup (REQUIRED for Docker deployment) + # ======================================================================= + - AUTO_SETUP=true + + # ======================================================================= + # Server Configuration + # ======================================================================= + - SERVER_HOST=0.0.0.0 + - SERVER_PORT=8080 + - SERVER_MODE=${SERVER_MODE:-release} + - RUN_MODE=${RUN_MODE:-standard} + + # ======================================================================= + # Database Configuration (PostgreSQL) + # ======================================================================= + - DATABASE_HOST=postgres + - DATABASE_PORT=5432 + - DATABASE_USER=${POSTGRES_USER:-sub2api} + - DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - DATABASE_DBNAME=${POSTGRES_DB:-sub2api} + - DATABASE_SSLMODE=disable + + # ======================================================================= + # Redis Configuration + # ======================================================================= + - REDIS_HOST=redis + - REDIS_PORT=6379 + - REDIS_PASSWORD=${REDIS_PASSWORD:-} + - REDIS_DB=${REDIS_DB:-0} + - REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false} + + # ======================================================================= + # Admin Account (auto-created on first run) + # ======================================================================= + - ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local} + - ADMIN_PASSWORD=${ADMIN_PASSWORD:-} + + # ======================================================================= + # JWT Configuration + # ======================================================================= + # IMPORTANT: Set a fixed JWT_SECRET to prevent login sessions from being + # invalidated after container restarts. If left empty, a random secret + # will be generated on each startup. + # Generate a secure secret: openssl rand -hex 32 + - JWT_SECRET=${JWT_SECRET:-} + - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + + # ======================================================================= + # TOTP (2FA) Configuration + # ======================================================================= + # IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty, + # a random key will be generated on each startup, causing all existing + # TOTP configurations to become invalid (users won't be able to login + # with 2FA). + # Generate a secure key: openssl rand -hex 32 + - TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-} + + # ======================================================================= + # Timezone Configuration + # This affects ALL time operations in the application: + # - Database timestamps + # - Usage statistics "today" boundary + # - Subscription expiry times + # - Log timestamps + # Common values: Asia/Shanghai, America/New_York, Europe/London, UTC + # ======================================================================= + - TZ=${TZ:-Asia/Shanghai} + + # ======================================================================= + # Gemini OAuth Configuration (for Gemini accounts) + # ======================================================================= + - GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-} + - GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-} + - GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-} + - GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-} + + # ======================================================================= + # Security Configuration (URL Allowlist) + # ======================================================================= + # Enable URL allowlist validation (false to skip allowlist checks) + - SECURITY_URL_ALLOWLIST_ENABLED=${SECURITY_URL_ALLOWLIST_ENABLED:-false} + # Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https) + - SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=${SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP:-false} + # Allow private IP addresses for upstream/pricing/CRS (for internal deployments) + - SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=${SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS:-false} + # Upstream hosts whitelist (comma-separated, only used when enabled=true) + - SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS=${SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS:-} + + # ======================================================================= + # Update Configuration (在线更新配置) + # ======================================================================= + # Proxy for accessing GitHub (online updates + pricing data) + # Examples: http://host:port, socks5://host:port + - UPDATE_PROXY_URL=${UPDATE_PROXY_URL:-} + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + networks: + - sub2api-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + + # =========================================================================== + # PostgreSQL Database + # =========================================================================== + postgres: + image: postgres:18-alpine + container_name: sub2api-postgres + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + # Local directory mapping for easy migration + - ./postgres_data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${POSTGRES_USER:-sub2api} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - POSTGRES_DB=${POSTGRES_DB:-sub2api} + - PGDATA=/var/lib/postgresql/data + - TZ=${TZ:-Asia/Shanghai} + networks: + - sub2api-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + # 注意:不暴露端口到宿主机,应用通过内部网络连接 + # 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"] + + # =========================================================================== + # Redis Cache + # =========================================================================== + redis: + image: redis:8-alpine + container_name: sub2api-redis + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + # Local directory mapping for easy migration + - ./redis_data:/data + command: > + sh -c ' + redis-server + --save 60 1 + --appendonly yes + --appendfsync everysec + ${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}' + environment: + - TZ=${TZ:-Asia/Shanghai} + # REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag) + - REDISCLI_AUTH=${REDIS_PASSWORD:-} + networks: + - sub2api-network + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 5s + +# ============================================================================= +# Networks +# ============================================================================= +networks: + sub2api-network: + driver: bridge diff --git a/deploy/docker-deploy.sh b/deploy/docker-deploy.sh new file mode 100644 index 00000000..1e4ce81f --- /dev/null +++ b/deploy/docker-deploy.sh @@ -0,0 +1,171 @@ +#!/bin/bash +# ============================================================================= +# Sub2API Docker Deployment Preparation Script +# ============================================================================= +# This script prepares deployment files for Sub2API: +# - Downloads docker-compose.local.yml and .env.example +# - Generates secure secrets (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD) +# - Creates necessary data directories +# +# After running this script, you can start services with: +# docker-compose -f docker-compose.local.yml up -d +# ============================================================================= + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# GitHub raw content base URL +GITHUB_RAW_URL="https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy" + +# Print colored message +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Generate random secret +generate_secret() { + openssl rand -hex 32 +} + +# Check if command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +# Main installation function +main() { + echo "" + echo "==========================================" + echo " Sub2API Deployment Preparation" + echo "==========================================" + echo "" + + # Check if openssl is available + if ! command_exists openssl; then + print_error "openssl is not installed. Please install openssl first." + exit 1 + fi + + # Check if deployment already exists + if [ -f "docker-compose.local.yml" ] && [ -f ".env" ]; then + print_warning "Deployment files already exist in current directory." + read -p "Overwrite existing files? (y/N): " -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + print_info "Cancelled." + exit 0 + fi + fi + + # Download docker-compose.local.yml + print_info "Downloading docker-compose.local.yml..." + if command_exists curl; then + curl -sSL "${GITHUB_RAW_URL}/docker-compose.local.yml" -o docker-compose.local.yml + elif command_exists wget; then + wget -q "${GITHUB_RAW_URL}/docker-compose.local.yml" -O docker-compose.local.yml + else + print_error "Neither curl nor wget is installed. Please install one of them." + exit 1 + fi + print_success "Downloaded docker-compose.local.yml" + + # Download .env.example + print_info "Downloading .env.example..." + if command_exists curl; then + curl -sSL "${GITHUB_RAW_URL}/.env.example" -o .env.example + else + wget -q "${GITHUB_RAW_URL}/.env.example" -O .env.example + fi + print_success "Downloaded .env.example" + + # Generate .env file with auto-generated secrets + print_info "Generating secure secrets..." + echo "" + + # Generate secrets + JWT_SECRET=$(generate_secret) + TOTP_ENCRYPTION_KEY=$(generate_secret) + POSTGRES_PASSWORD=$(generate_secret) + + # Create .env from .env.example + cp .env.example .env + + # Update .env with generated secrets (cross-platform compatible) + if sed --version >/dev/null 2>&1; then + # GNU sed (Linux) + sed -i "s/^JWT_SECRET=.*/JWT_SECRET=${JWT_SECRET}/" .env + sed -i "s/^TOTP_ENCRYPTION_KEY=.*/TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}/" .env + sed -i "s/^POSTGRES_PASSWORD=.*/POSTGRES_PASSWORD=${POSTGRES_PASSWORD}/" .env + else + # BSD sed (macOS) + sed -i '' "s/^JWT_SECRET=.*/JWT_SECRET=${JWT_SECRET}/" .env + sed -i '' "s/^TOTP_ENCRYPTION_KEY=.*/TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}/" .env + sed -i '' "s/^POSTGRES_PASSWORD=.*/POSTGRES_PASSWORD=${POSTGRES_PASSWORD}/" .env + fi + + # Create data directories + print_info "Creating data directories..." + mkdir -p data postgres_data redis_data + print_success "Created data directories" + + # Set secure permissions for .env file (readable/writable only by owner) + chmod 600 .env + echo "" + + # Display completion message + echo "==========================================" + echo " Preparation Complete!" + echo "==========================================" + echo "" + echo "Generated secure credentials:" + echo " POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}" + echo " JWT_SECRET: ${JWT_SECRET}" + echo " TOTP_ENCRYPTION_KEY: ${TOTP_ENCRYPTION_KEY}" + echo "" + print_warning "These credentials have been saved to .env file." + print_warning "Please keep them secure and do not share publicly!" + echo "" + echo "Directory structure:" + echo " docker-compose.local.yml - Docker Compose configuration" + echo " .env - Environment variables (generated secrets)" + echo " .env.example - Example template (for reference)" + echo " data/ - Application data (will be created on first run)" + echo " postgres_data/ - PostgreSQL data" + echo " redis_data/ - Redis data" + echo "" + echo "Next steps:" + echo " 1. (Optional) Edit .env to customize configuration" + echo " 2. Start services:" + echo " docker-compose -f docker-compose.local.yml up -d" + echo "" + echo " 3. View logs:" + echo " docker-compose -f docker-compose.local.yml logs -f sub2api" + echo "" + echo " 4. Access Web UI:" + echo " http://localhost:8080" + echo "" + print_info "If admin password is not set in .env, it will be auto-generated." + print_info "Check logs for the generated admin password on first startup." + echo "" +} + +# Run main function +main "$@" From 426ce616c0628a9208825a8acbc1b3651f368157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B0=8F=E5=8C=97?= Date: Mon, 2 Feb 2026 17:41:27 +0800 Subject: [PATCH 071/214] =?UTF-8?q?feat:=20=E6=94=AF=E6=8C=81=E5=9C=A8?= =?UTF-8?q?=E7=94=A8=E6=88=B7=E6=90=9C=E7=B4=A2=E4=B8=AD=E4=BD=BF=E7=94=A8?= =?UTF-8?q?=E5=A4=87=E6=B3=A8=E5=AD=97=E6=AE=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 在用户仓库的搜索过滤器中添加备注字段 - 管理员现在可以通过备注/标记搜索用户 - 使用不区分大小写的搜索(ContainsFold) Changes: - backend/internal/repository/user_repo.go: 添加 NotesContainsFold 到搜索条件 --- backend/internal/repository/user_repo.go | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/internal/repository/user_repo.go b/backend/internal/repository/user_repo.go index fe5b645c..654bd16b 100644 --- a/backend/internal/repository/user_repo.go +++ b/backend/internal/repository/user_repo.go @@ -190,6 +190,7 @@ func (r *userRepository) ListWithFilters(ctx context.Context, params pagination. dbuser.Or( dbuser.EmailContainsFold(filters.Search), dbuser.UsernameContainsFold(filters.Search), + dbuser.NotesContainsFold(filters.Search), ), ) } From ae18397ca62132ea77c68e6a0ca8fc5ebc86e784 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B0=8F=E5=8C=97?= Date: Mon, 2 Feb 2026 17:44:50 +0800 Subject: [PATCH 072/214] =?UTF-8?q?feat:=20=E5=90=91=E7=94=A8=E6=88=B7?= =?UTF-8?q?=E6=98=BE=E7=A4=BA=E7=AE=A1=E7=90=86=E5=91=98=E8=B0=83=E6=95=B4?= =?UTF-8?q?=E4=BD=99=E9=A2=9D=E7=9A=84=E5=A4=87=E6=B3=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 为RedeemCode DTO添加notes字段(仅用于admin_balance/admin_concurrency类型) - 更新mapper使其有条件地包含备注信息 - 在用户兑换历史UI中显示备注 - 备注以斜体显示,悬停时显示完整内容 用户现在可以看到管理员调整其余额的原因说明。 Changes: - backend/internal/handler/dto/types.go: RedeemCode添加notes字段 - backend/internal/handler/dto/mappers.go: 条件性填充notes - frontend/src/api/redeem.ts: TypeScript接口添加notes - frontend/src/views/user/RedeemView.vue: UI显示备注信息 --- backend/internal/handler/dto/mappers.go | 10 +++++++++- backend/internal/handler/dto/types.go | 4 ++++ frontend/src/api/redeem.ts | 4 +++- frontend/src/views/user/RedeemView.vue | 8 ++++++++ 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index d58a8a29..886a5535 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -321,7 +321,7 @@ func RedeemCodeFromServiceAdmin(rc *service.RedeemCode) *AdminRedeemCode { } func redeemCodeFromServiceBase(rc *service.RedeemCode) RedeemCode { - return RedeemCode{ + out := RedeemCode{ ID: rc.ID, Code: rc.Code, Type: rc.Type, @@ -335,6 +335,14 @@ func redeemCodeFromServiceBase(rc *service.RedeemCode) RedeemCode { User: UserFromServiceShallow(rc.User), Group: GroupFromServiceShallow(rc.Group), } + + // For admin_balance/admin_concurrency types, include notes so users can see + // why they were charged or credited by admin + if (rc.Type == "admin_balance" || rc.Type == "admin_concurrency") && rc.Notes != "" { + out.Notes = &rc.Notes + } + + return out } // AccountSummaryFromService returns a minimal AccountSummary for usage log display. diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 938d707c..4cfaef5f 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -198,6 +198,10 @@ type RedeemCode struct { GroupID *int64 `json:"group_id"` ValidityDays int `json:"validity_days"` + // Notes is only populated for admin_balance/admin_concurrency types + // so users can see why they were charged or credited + Notes *string `json:"notes,omitempty"` + User *User `json:"user,omitempty"` Group *Group `json:"group,omitempty"` } diff --git a/frontend/src/api/redeem.ts b/frontend/src/api/redeem.ts index 9e1c7d94..22abf4d8 100644 --- a/frontend/src/api/redeem.ts +++ b/frontend/src/api/redeem.ts @@ -14,7 +14,9 @@ export interface RedeemHistoryItem { status: string used_at: string created_at: string - // 订阅类型专用字段 + // Notes from admin for admin_balance/admin_concurrency types + notes?: string + // Subscription-specific fields group_id?: number validity_days?: number group?: { diff --git a/frontend/src/views/user/RedeemView.vue b/frontend/src/views/user/RedeemView.vue index 96158596..5850c084 100644 --- a/frontend/src/views/user/RedeemView.vue +++ b/frontend/src/views/user/RedeemView.vue @@ -312,6 +312,14 @@

{{ t('redeem.adminAdjustment') }}

+ +

+ {{ item.notes }} +

From c441638fc01ca9aeffb60133a2d459d53429ecf5 Mon Sep 17 00:00:00 2001 From: JIA-ss <627723154@qq.com> Date: Mon, 2 Feb 2026 18:30:06 +0800 Subject: [PATCH 073/214] =?UTF-8?q?feat(gateway):=20=E5=A2=9E=E5=BC=BA=20/?= =?UTF-8?q?v1/usage=20=E7=AB=AF=E7=82=B9=E8=BF=94=E5=9B=9E=E5=AE=8C?= =?UTF-8?q?=E6=95=B4=E7=94=A8=E9=87=8F=E7=BB=9F=E8=AE=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 为 CC Switch 集成增强 /v1/usage 网关端点,在保持原有 4 字段 (isValid, planName, remaining, unit) 向后兼容的基础上,新增: - usage 对象:今日/累计的请求数、token 用量、费用,以及 RPM/TPM - subscription 对象(订阅模式):日/周/月用量和限额、过期时间 - balance 字段(余额模式):当前钱包余额 用量数据获取采用 best-effort 策略,失败不影响基础响应。 Co-Authored-By: Claude Opus 4.5 --- backend/cmd/server/wire_gen.go | 2 +- backend/internal/handler/gateway_handler.go | 68 ++++++++++++++++++--- 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 7d465fee..fd4383bf 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -173,7 +173,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository) userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService) adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) - gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig) + gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, configConfig) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) totpHandler := handler.NewTotpHandler(totpService) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 70ea51bf..842242ca 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -30,6 +30,7 @@ type GatewayHandler struct { antigravityGatewayService *service.AntigravityGatewayService userService *service.UserService billingCacheService *service.BillingCacheService + usageService *service.UsageService concurrencyHelper *ConcurrencyHelper maxAccountSwitches int maxAccountSwitchesGemini int @@ -43,6 +44,7 @@ func NewGatewayHandler( userService *service.UserService, concurrencyService *service.ConcurrencyService, billingCacheService *service.BillingCacheService, + usageService *service.UsageService, cfg *config.Config, ) *GatewayHandler { pingInterval := time.Duration(0) @@ -63,6 +65,7 @@ func NewGatewayHandler( antigravityGatewayService: antigravityGatewayService, userService: userService, billingCacheService: billingCacheService, + usageService: usageService, concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude, pingInterval), maxAccountSwitches: maxAccountSwitches, maxAccountSwitchesGemini: maxAccountSwitchesGemini, @@ -524,7 +527,7 @@ func (h *GatewayHandler) AntigravityModels(c *gin.Context) { }) } -// Usage handles getting account balance for CC Switch integration +// Usage handles getting account balance and usage statistics for CC Switch integration // GET /v1/usage func (h *GatewayHandler) Usage(c *gin.Context) { apiKey, ok := middleware2.GetAPIKeyFromContext(c) @@ -539,7 +542,40 @@ func (h *GatewayHandler) Usage(c *gin.Context) { return } - // 订阅模式:返回订阅限额信息 + // Best-effort: 获取用量统计,失败不影响基础响应 + var usageData gin.H + if h.usageService != nil { + dashStats, err := h.usageService.GetUserDashboardStats(c.Request.Context(), subject.UserID) + if err == nil && dashStats != nil { + usageData = gin.H{ + "today": gin.H{ + "requests": dashStats.TodayRequests, + "input_tokens": dashStats.TodayInputTokens, + "output_tokens": dashStats.TodayOutputTokens, + "cache_creation_tokens": dashStats.TodayCacheCreationTokens, + "cache_read_tokens": dashStats.TodayCacheReadTokens, + "total_tokens": dashStats.TodayTokens, + "cost": dashStats.TodayCost, + "actual_cost": dashStats.TodayActualCost, + }, + "total": gin.H{ + "requests": dashStats.TotalRequests, + "input_tokens": dashStats.TotalInputTokens, + "output_tokens": dashStats.TotalOutputTokens, + "cache_creation_tokens": dashStats.TotalCacheCreationTokens, + "cache_read_tokens": dashStats.TotalCacheReadTokens, + "total_tokens": dashStats.TotalTokens, + "cost": dashStats.TotalCost, + "actual_cost": dashStats.TotalActualCost, + }, + "average_duration_ms": dashStats.AverageDurationMs, + "rpm": dashStats.Rpm, + "tpm": dashStats.Tpm, + } + } + } + + // 订阅模式:返回订阅限额信息 + 用量统计 if apiKey.Group != nil && apiKey.Group.IsSubscriptionType() { subscription, ok := middleware2.GetSubscriptionFromContext(c) if !ok { @@ -548,28 +584,46 @@ func (h *GatewayHandler) Usage(c *gin.Context) { } remaining := h.calculateSubscriptionRemaining(apiKey.Group, subscription) - c.JSON(http.StatusOK, gin.H{ + resp := gin.H{ "isValid": true, "planName": apiKey.Group.Name, "remaining": remaining, "unit": "USD", - }) + "subscription": gin.H{ + "daily_usage_usd": subscription.DailyUsageUSD, + "weekly_usage_usd": subscription.WeeklyUsageUSD, + "monthly_usage_usd": subscription.MonthlyUsageUSD, + "daily_limit_usd": apiKey.Group.DailyLimitUSD, + "weekly_limit_usd": apiKey.Group.WeeklyLimitUSD, + "monthly_limit_usd": apiKey.Group.MonthlyLimitUSD, + "expires_at": subscription.ExpiresAt, + }, + } + if usageData != nil { + resp["usage"] = usageData + } + c.JSON(http.StatusOK, resp) return } - // 余额模式:返回钱包余额 + // 余额模式:返回钱包余额 + 用量统计 latestUser, err := h.userService.GetByID(c.Request.Context(), subject.UserID) if err != nil { h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to get user info") return } - c.JSON(http.StatusOK, gin.H{ + resp := gin.H{ "isValid": true, "planName": "钱包余额", "remaining": latestUser.Balance, "unit": "USD", - }) + "balance": latestUser.Balance, + } + if usageData != nil { + resp["usage"] = usageData + } + c.JSON(http.StatusOK, resp) } // calculateSubscriptionRemaining 计算订阅剩余可用额度 From 673caf41a02946a6562848f0b887ffdeeed39e8c Mon Sep 17 00:00:00 2001 From: Zero Clover Date: Mon, 2 Feb 2026 18:50:54 +0800 Subject: [PATCH 074/214] =?UTF-8?q?feat(ops):=20=E5=B0=86=20USER=5FINACTIV?= =?UTF-8?q?E=20=E9=94=99=E8=AF=AF=E6=8E=92=E9=99=A4=E5=9C=A8=20SLA=20?= =?UTF-8?q?=E7=BB=9F=E8=AE=A1=E4=B9=8B=E5=A4=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 将账户停用 (USER_INACTIVE) 导致的请求失败视为业务限制类错误,不计入 SLA 和错误率统计。 账户停用是预期内的业务结果,不应被视为系统错误或服务质量问题。此改动使错误分类更加准确,避免将预期的业务限制误报为系统故障。 修改内容: - 在 classifyOpsIsBusinessLimited 函数中添加 USER_INACTIVE 错误码 - 该类错误不再触发错误率告警 Fixes Wei-Shaw/sub2api#453 --- backend/internal/handler/ops_error_logger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/handler/ops_error_logger.go b/backend/internal/handler/ops_error_logger.go index f62e6b3e..4dc0a9cc 100644 --- a/backend/internal/handler/ops_error_logger.go +++ b/backend/internal/handler/ops_error_logger.go @@ -905,7 +905,7 @@ func classifyOpsIsRetryable(errType string, statusCode int) bool { func classifyOpsIsBusinessLimited(errType, phase, code string, status int, message string) bool { switch strings.TrimSpace(code) { - case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID", "USER_INACTIVE": return true } if phase == "billing" || phase == "concurrency" { From 79fa18132b09d85445ba035320dc6d5efe9ee36b Mon Sep 17 00:00:00 2001 From: shaw Date: Mon, 2 Feb 2026 19:58:23 +0800 Subject: [PATCH 075/214] =?UTF-8?q?fix(gateway):=20=E4=BF=AE=E5=A4=8D=20OA?= =?UTF-8?q?uth=20token=20=E5=88=B7=E6=96=B0=E5=90=8E=E8=B0=83=E5=BA=A6?= =?UTF-8?q?=E5=99=A8=E7=BC=93=E5=AD=98=E4=B8=8D=E4=B8=80=E8=87=B4=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Token 刷新成功后,调度器缓存中的 Account 对象仍包含旧的 credentials, 导致在 Outbox 异步更新之前(最多 1 秒窗口)请求使用过期 token, 返回 403 错误(OAuth token has been revoked)。 修复方案:在 token 刷新成功后同步更新调度器缓存,确保调度获取的 Account 对象立即包含最新的 access_token 和 _token_version。 此修复覆盖所有 OAuth 平台:OpenAI、Claude、Gemini、Antigravity。 --- backend/cmd/server/wire_gen.go | 2 +- .../internal/service/token_refresh_service.go | 12 +++++++++++ .../service/token_refresh_service_test.go | 20 +++++++++---------- backend/internal/service/wire.go | 3 ++- 4 files changed, 25 insertions(+), 12 deletions(-) diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 7d465fee..e99979ef 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -188,7 +188,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig) opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig) opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) - tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, configConfig) + tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig) accountExpiryService := service.ProvideAccountExpiryService(accountRepository) subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository) v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go index 6ef92bbf..c33cbf48 100644 --- a/backend/internal/service/token_refresh_service.go +++ b/backend/internal/service/token_refresh_service.go @@ -18,6 +18,7 @@ type TokenRefreshService struct { refreshers []TokenRefresher cfg *config.TokenRefreshConfig cacheInvalidator TokenCacheInvalidator + schedulerCache SchedulerCache // 用于同步更新调度器缓存,解决 token 刷新后缓存不一致问题 stopCh chan struct{} wg sync.WaitGroup @@ -31,12 +32,14 @@ func NewTokenRefreshService( geminiOAuthService *GeminiOAuthService, antigravityOAuthService *AntigravityOAuthService, cacheInvalidator TokenCacheInvalidator, + schedulerCache SchedulerCache, cfg *config.Config, ) *TokenRefreshService { s := &TokenRefreshService{ accountRepo: accountRepo, cfg: &cfg.TokenRefresh, cacheInvalidator: cacheInvalidator, + schedulerCache: schedulerCache, stopCh: make(chan struct{}), } @@ -198,6 +201,15 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc log.Printf("[TokenRefresh] Token cache invalidated for account %d", account.ID) } } + // 同步更新调度器缓存,确保调度获取的 Account 对象包含最新的 credentials + // 这解决了 token 刷新后调度器缓存数据不一致的问题(#445) + if s.schedulerCache != nil { + if err := s.schedulerCache.SetAccount(ctx, account); err != nil { + log.Printf("[TokenRefresh] Failed to sync scheduler cache for account %d: %v", account.ID, err) + } else { + log.Printf("[TokenRefresh] Scheduler cache synced for account %d", account.ID) + } + } return nil } diff --git a/backend/internal/service/token_refresh_service_test.go b/backend/internal/service/token_refresh_service_test.go index d23a0bb6..8e16c6f5 100644 --- a/backend/internal/service/token_refresh_service_test.go +++ b/backend/internal/service/token_refresh_service_test.go @@ -70,7 +70,7 @@ func TestTokenRefreshService_RefreshWithRetry_InvalidatesCache(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 5, Platform: PlatformGemini, @@ -98,7 +98,7 @@ func TestTokenRefreshService_RefreshWithRetry_InvalidatorErrorIgnored(t *testing RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 6, Platform: PlatformGemini, @@ -124,7 +124,7 @@ func TestTokenRefreshService_RefreshWithRetry_NilInvalidator(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, nil, cfg) account := &Account{ ID: 7, Platform: PlatformGemini, @@ -151,7 +151,7 @@ func TestTokenRefreshService_RefreshWithRetry_Antigravity(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 8, Platform: PlatformAntigravity, @@ -179,7 +179,7 @@ func TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 9, Platform: PlatformGemini, @@ -207,7 +207,7 @@ func TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 10, Platform: PlatformOpenAI, // OpenAI OAuth 账户 @@ -235,7 +235,7 @@ func TestTokenRefreshService_RefreshWithRetry_UpdateFailed(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 11, Platform: PlatformGemini, @@ -264,7 +264,7 @@ func TestTokenRefreshService_RefreshWithRetry_RefreshFailed(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 12, Platform: PlatformGemini, @@ -291,7 +291,7 @@ func TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed(t *testin RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 13, Platform: PlatformAntigravity, @@ -318,7 +318,7 @@ func TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError(t *te RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 14, Platform: PlatformAntigravity, diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 096e15a0..4b721bb6 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -44,9 +44,10 @@ func ProvideTokenRefreshService( geminiOAuthService *GeminiOAuthService, antigravityOAuthService *AntigravityOAuthService, cacheInvalidator TokenCacheInvalidator, + schedulerCache SchedulerCache, cfg *config.Config, ) *TokenRefreshService { - svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cacheInvalidator, cfg) + svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cacheInvalidator, schedulerCache, cfg) svc.Start() return svc } From ad1cdba338ef88e7f8c1d0a5360fca80d95a56a2 Mon Sep 17 00:00:00 2001 From: Zero Clover Date: Mon, 2 Feb 2026 20:16:17 +0800 Subject: [PATCH 076/214] =?UTF-8?q?feat(ops):=20=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E8=BF=87=E6=BB=A4=E6=97=A0=E6=95=88=20API=20Key=20=E9=94=99?= =?UTF-8?q?=E8=AF=AF=EF=BC=8C=E4=B8=8D=E5=86=99=E5=85=A5=E9=94=99=E8=AF=AF?= =?UTF-8?q?=E6=97=A5=E5=BF=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增 IgnoreInvalidApiKeyErrors 开关,启用后 INVALID_API_KEY 和 API_KEY_REQUIRED 错误将被完全跳过,不写入 Ops 错误日志。 这些错误由用户错误配置导致,与服务质量无关。 --- backend/internal/handler/ops_error_logger.go | 7 +++++++ backend/internal/service/ops_settings_models.go | 1 + frontend/src/i18n/locales/en.ts | 2 ++ frontend/src/i18n/locales/zh.ts | 4 +++- .../views/admin/ops/components/OpsSettingsDialog.vue | 10 ++++++++++ 5 files changed, 23 insertions(+), 1 deletion(-) diff --git a/backend/internal/handler/ops_error_logger.go b/backend/internal/handler/ops_error_logger.go index f62e6b3e..4d346842 100644 --- a/backend/internal/handler/ops_error_logger.go +++ b/backend/internal/handler/ops_error_logger.go @@ -1011,5 +1011,12 @@ func shouldSkipOpsErrorLog(ctx context.Context, ops *service.OpsService, message } } + // Check if invalid/missing API key errors should be ignored (user misconfiguration) + if settings.IgnoreInvalidApiKeyErrors { + if strings.Contains(bodyLower, "invalid_api_key") || strings.Contains(bodyLower, "api_key_required") { + return true + } + } + return false } diff --git a/backend/internal/service/ops_settings_models.go b/backend/internal/service/ops_settings_models.go index df06f578..ecc62220 100644 --- a/backend/internal/service/ops_settings_models.go +++ b/backend/internal/service/ops_settings_models.go @@ -83,6 +83,7 @@ type OpsAdvancedSettings struct { IgnoreCountTokensErrors bool `json:"ignore_count_tokens_errors"` IgnoreContextCanceled bool `json:"ignore_context_canceled"` IgnoreNoAvailableAccounts bool `json:"ignore_no_available_accounts"` + IgnoreInvalidApiKeyErrors bool `json:"ignore_invalid_api_key_errors"` AutoRefreshEnabled bool `json:"auto_refresh_enabled"` AutoRefreshIntervalSec int `json:"auto_refresh_interval_seconds"` } diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index bb7defd8..1d53ddb6 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -2792,6 +2792,8 @@ export default { ignoreContextCanceledHint: 'When enabled, client disconnect (context canceled) errors will not be written to the error log.', ignoreNoAvailableAccounts: 'Ignore no available accounts errors', ignoreNoAvailableAccountsHint: 'When enabled, "No available accounts" errors will not be written to the error log (not recommended; usually a config issue).', + ignoreInvalidApiKeyErrors: 'Ignore invalid API key errors', + ignoreInvalidApiKeyErrorsHint: 'When enabled, invalid or missing API key errors (INVALID_API_KEY, API_KEY_REQUIRED) will not be written to the error log.', autoRefresh: 'Auto Refresh', enableAutoRefresh: 'Enable auto refresh', enableAutoRefreshHint: 'Automatically refresh dashboard data at a fixed interval.', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 2e6230b2..a0ed426e 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -2944,7 +2944,9 @@ export default { ignoreContextCanceled: '忽略客户端断连错误', ignoreContextCanceledHint: '启用后,客户端主动断开连接(context canceled)的错误将不会写入错误日志。', ignoreNoAvailableAccounts: '忽略无可用账号错误', - ignoreNoAvailableAccountsHint: '启用后,“No available accounts” 错误将不会写入错误日志(不推荐,这通常是配置问题)。', + ignoreNoAvailableAccountsHint: '启用后,"No available accounts" 错误将不会写入错误日志(不推荐,这通常是配置问题)。', + ignoreInvalidApiKeyErrors: '忽略无效 API Key 错误', + ignoreInvalidApiKeyErrorsHint: '启用后,无效或缺失 API Key 的错误(INVALID_API_KEY、API_KEY_REQUIRED)将不会写入错误日志。', autoRefresh: '自动刷新', enableAutoRefresh: '启用自动刷新', enableAutoRefreshHint: '自动刷新仪表板数据,启用后会定期拉取最新数据。', diff --git a/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue b/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue index 53ab6683..3bec6d0d 100644 --- a/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue +++ b/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue @@ -505,6 +505,16 @@ async function saveAllSettings() { + +
+
+ +

+ {{ t('admin.ops.settings.ignoreInvalidApiKeyErrorsHint') }} +

+
+ +
From 7b1d63a7867e9f41ef06a4afb039ed14b1a026fd Mon Sep 17 00:00:00 2001 From: shaw Date: Mon, 2 Feb 2026 21:01:32 +0800 Subject: [PATCH 077/214] =?UTF-8?q?fix(types):=20=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E7=BC=BA=E5=A4=B1=E7=9A=84=20ignore=5Finvalid=5Fapi=5Fkey=5Fer?= =?UTF-8?q?rors=20=E7=B1=BB=E5=9E=8B=E5=AE=9A=E4=B9=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OpsAdvancedSettings 接口缺少 ignore_invalid_api_key_errors 字段, 导致 TypeScript 编译报错。 --- frontend/src/api/admin/ops.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 6e048436..9e0444b1 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -776,6 +776,7 @@ export interface OpsAdvancedSettings { ignore_count_tokens_errors: boolean ignore_context_canceled: boolean ignore_no_available_accounts: boolean + ignore_invalid_api_key_errors: boolean auto_refresh_enabled: boolean auto_refresh_interval_seconds: number } From 45e1429ae8bd9ed1c32e6eced2a74e81457b062d Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Mon, 2 Feb 2026 16:37:22 +0800 Subject: [PATCH 078/214] =?UTF-8?q?feat(billing):=20=E6=B7=BB=E5=8A=A0=20G?= =?UTF-8?q?emini=20200K=20=E9=95=BF=E4=B8=8A=E4=B8=8B=E6=96=87=E5=8F=8C?= =?UTF-8?q?=E5=80=8D=E8=AE=A1=E8=B4=B9=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增 CalculateCostWithLongContext 方法支持阈值双倍计费 - 新增 RecordUsageWithLongContext 方法专用于 Gemini 计费 - Gemini 超过 200K token 的部分按 2 倍费率计算 - 其他平台(Claude/OpenAI)完全不受影响 --- .../internal/handler/gemini_v1beta_handler.go | 21 ++- backend/internal/service/billing_service.go | 59 +++++++ backend/internal/service/gateway_service.go | 156 ++++++++++++++++++ 3 files changed, 227 insertions(+), 9 deletions(-) diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 32f83013..d1b19ede 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -366,18 +366,21 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { userAgent := c.GetHeader("User-Agent") clientIP := ip.GetClientIP(c) - // 6) record usage async + // 6) record usage async (Gemini 使用长上下文双倍计费) go func(result *service.ForwardResult, usedAccount *service.Account, ua, ip string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ - Result: result, - APIKey: apiKey, - User: apiKey.User, - Account: usedAccount, - Subscription: subscription, - UserAgent: ua, - IPAddress: ip, + + if err := h.gatewayService.RecordUsageWithLongContext(ctx, &service.RecordUsageLongContextInput{ + Result: result, + APIKey: apiKey, + User: apiKey.User, + Account: usedAccount, + Subscription: subscription, + UserAgent: ua, + IPAddress: ip, + LongContextThreshold: 200000, // Gemini 200K 阈值 + LongContextMultiplier: 2.0, // 超出部分双倍计费 }); err != nil { log.Printf("Record usage failed: %v", err) } diff --git a/backend/internal/service/billing_service.go b/backend/internal/service/billing_service.go index f2afc343..95e16c4e 100644 --- a/backend/internal/service/billing_service.go +++ b/backend/internal/service/billing_service.go @@ -241,6 +241,65 @@ func (s *BillingService) CalculateCostWithConfig(model string, tokens UsageToken return s.CalculateCost(model, tokens, multiplier) } +// CalculateCostWithLongContext 计算费用,支持长上下文双倍计费 +// threshold: 阈值(如 200000),超过此值的部分按 extraMultiplier 倍计费 +// extraMultiplier: 超出部分的倍率(如 2.0 表示双倍) +func (s *BillingService) CalculateCostWithLongContext(model string, tokens UsageTokens, rateMultiplier float64, threshold int, extraMultiplier float64) (*CostBreakdown, error) { + // 1. 先正常计算全部 token 的成本 + cost, err := s.CalculateCost(model, tokens, rateMultiplier) + if err != nil { + return nil, err + } + + // 2. 如果未启用长上下文计费或未超过阈值,直接返回 + if threshold <= 0 || extraMultiplier <= 1 { + return cost, nil + } + + // 计算总输入 token(缓存读取 + 新输入) + total := tokens.CacheReadTokens + tokens.InputTokens + if total <= threshold { + return cost, nil + } + + // 3. 拆分超出部分的 token + extra := total - threshold + var extraCacheTokens, extraInputTokens int + + if tokens.CacheReadTokens >= threshold { + // 缓存已超过阈值:超出的缓存 + 全部输入 + extraCacheTokens = tokens.CacheReadTokens - threshold + extraInputTokens = tokens.InputTokens + } else { + // 缓存未超过阈值:只有输入超出部分 + extraCacheTokens = 0 + extraInputTokens = extra + } + + // 4. 计算超出部分的成本(只算输入和缓存读取) + extraTokens := UsageTokens{ + InputTokens: extraInputTokens, + CacheReadTokens: extraCacheTokens, + } + extraCost, err := s.CalculateCost(model, extraTokens, 1.0) // 先按 1 倍算 + if err != nil { + return cost, nil // 出错时返回正常成本 + } + + // 5. 额外成本 = 超出部分成本 × (倍率 - 1) + extraRate := extraMultiplier - 1 + additionalInputCost := extraCost.InputCost * extraRate + additionalCacheCost := extraCost.CacheReadCost * extraRate + + // 6. 累加到总成本 + cost.InputCost += additionalInputCost + cost.CacheReadCost += additionalCacheCost + cost.TotalCost += additionalInputCost + additionalCacheCost + cost.ActualCost = cost.TotalCost * rateMultiplier + + return cost, nil +} + // ListSupportedModels 列出所有支持的模型(现在总是返回true,因为有模糊匹配) func (s *BillingService) ListSupportedModels() []string { models := make([]string, 0) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 7a901907..9125163a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -3606,6 +3606,162 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu return nil } +// RecordUsageLongContextInput 记录使用量的输入参数(支持长上下文双倍计费) +type RecordUsageLongContextInput struct { + Result *ForwardResult + APIKey *APIKey + User *User + Account *Account + Subscription *UserSubscription // 可选:订阅信息 + UserAgent string // 请求的 User-Agent + IPAddress string // 请求的客户端 IP 地址 + LongContextThreshold int // 长上下文阈值(如 200000) + LongContextMultiplier float64 // 超出阈值部分的倍率(如 2.0) +} + +// RecordUsageWithLongContext 记录使用量并扣费,支持长上下文双倍计费(用于 Gemini) +func (s *GatewayService) RecordUsageWithLongContext(ctx context.Context, input *RecordUsageLongContextInput) error { + result := input.Result + apiKey := input.APIKey + user := input.User + account := input.Account + subscription := input.Subscription + + // 获取费率倍数 + multiplier := s.cfg.Default.RateMultiplier + if apiKey.GroupID != nil && apiKey.Group != nil { + multiplier = apiKey.Group.RateMultiplier + } + + var cost *CostBreakdown + + // 根据请求类型选择计费方式 + if result.ImageCount > 0 { + // 图片生成计费 + var groupConfig *ImagePriceConfig + if apiKey.Group != nil { + groupConfig = &ImagePriceConfig{ + Price1K: apiKey.Group.ImagePrice1K, + Price2K: apiKey.Group.ImagePrice2K, + Price4K: apiKey.Group.ImagePrice4K, + } + } + cost = s.billingService.CalculateImageCost(result.Model, result.ImageSize, result.ImageCount, groupConfig, multiplier) + } else { + // Token 计费(使用长上下文计费方法) + tokens := UsageTokens{ + InputTokens: result.Usage.InputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + } + var err error + cost, err = s.billingService.CalculateCostWithLongContext(result.Model, tokens, multiplier, input.LongContextThreshold, input.LongContextMultiplier) + if err != nil { + log.Printf("Calculate cost failed: %v", err) + cost = &CostBreakdown{ActualCost: 0} + } + } + + // 判断计费方式:订阅模式 vs 余额模式 + isSubscriptionBilling := subscription != nil && apiKey.Group != nil && apiKey.Group.IsSubscriptionType() + billingType := BillingTypeBalance + if isSubscriptionBilling { + billingType = BillingTypeSubscription + } + + // 创建使用日志 + durationMs := int(result.Duration.Milliseconds()) + var imageSize *string + if result.ImageSize != "" { + imageSize = &result.ImageSize + } + accountRateMultiplier := account.BillingRateMultiplier() + usageLog := &UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: result.RequestID, + Model: result.Model, + InputTokens: result.Usage.InputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + InputCost: cost.InputCost, + OutputCost: cost.OutputCost, + CacheCreationCost: cost.CacheCreationCost, + CacheReadCost: cost.CacheReadCost, + TotalCost: cost.TotalCost, + ActualCost: cost.ActualCost, + RateMultiplier: multiplier, + AccountRateMultiplier: &accountRateMultiplier, + BillingType: billingType, + Stream: result.Stream, + DurationMs: &durationMs, + FirstTokenMs: result.FirstTokenMs, + ImageCount: result.ImageCount, + ImageSize: imageSize, + CreatedAt: time.Now(), + } + + // 添加 UserAgent + if input.UserAgent != "" { + usageLog.UserAgent = &input.UserAgent + } + + // 添加 IPAddress + if input.IPAddress != "" { + usageLog.IPAddress = &input.IPAddress + } + + // 添加分组和订阅关联 + if apiKey.GroupID != nil { + usageLog.GroupID = apiKey.GroupID + } + if subscription != nil { + usageLog.SubscriptionID = &subscription.ID + } + + inserted, err := s.usageLogRepo.Create(ctx, usageLog) + if err != nil { + log.Printf("Create usage log failed: %v", err) + } + + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + log.Printf("[SIMPLE MODE] Usage recorded (not billed): user=%d, tokens=%d", usageLog.UserID, usageLog.TotalTokens()) + s.deferredService.ScheduleLastUsedUpdate(account.ID) + return nil + } + + shouldBill := inserted || err != nil + + // 根据计费类型执行扣费 + if isSubscriptionBilling { + // 订阅模式:更新订阅用量(使用 TotalCost 原始费用,不考虑倍率) + if shouldBill && cost.TotalCost > 0 { + if err := s.userSubRepo.IncrementUsage(ctx, subscription.ID, cost.TotalCost); err != nil { + log.Printf("Increment subscription usage failed: %v", err) + } + // 异步更新订阅缓存 + s.billingCacheService.QueueUpdateSubscriptionUsage(user.ID, *apiKey.GroupID, cost.TotalCost) + } + } else { + // 余额模式:扣除用户余额(使用 ActualCost 考虑倍率后的费用) + if shouldBill && cost.ActualCost > 0 { + if err := s.userRepo.DeductBalance(ctx, user.ID, cost.ActualCost); err != nil { + log.Printf("Deduct balance failed: %v", err) + } + // 异步更新余额缓存 + s.billingCacheService.QueueDeductBalance(user.ID, cost.ActualCost) + } + } + + // Schedule batch update for account last_used_at + s.deferredService.ScheduleLastUsedUpdate(account.ID) + + return nil +} + // ForwardCountTokens 转发 count_tokens 请求到上游 API // 特点:不记录使用量、仅支持非流式响应 func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, account *Account, parsed *ParsedRequest) error { From b381e8ee73e3a362ed217dce48529aba76d849c4 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Mon, 2 Feb 2026 16:42:07 +0800 Subject: [PATCH 079/214] =?UTF-8?q?refactor(billing):=20=E7=AE=80=E5=8C=96?= =?UTF-8?q?=20CalculateCostWithLongContext=20=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 将 token 直接拆分为范围内和范围外两部分,分别调用 CalculateCost: - 范围内:正常计费 (rateMultiplier) - 范围外:双倍计费 (rateMultiplier × extraMultiplier) 代码更直观,便于理解和维护 --- backend/internal/service/billing_service.go | 81 ++++++++++++--------- 1 file changed, 46 insertions(+), 35 deletions(-) diff --git a/backend/internal/service/billing_service.go b/backend/internal/service/billing_service.go index 95e16c4e..db5a9708 100644 --- a/backend/internal/service/billing_service.go +++ b/backend/internal/service/billing_service.go @@ -244,60 +244,71 @@ func (s *BillingService) CalculateCostWithConfig(model string, tokens UsageToken // CalculateCostWithLongContext 计算费用,支持长上下文双倍计费 // threshold: 阈值(如 200000),超过此值的部分按 extraMultiplier 倍计费 // extraMultiplier: 超出部分的倍率(如 2.0 表示双倍) +// +// 示例:缓存 210k + 输入 10k = 220k,阈值 200k,倍率 2.0 +// 拆分为:范围内 (200k, 0) + 范围外 (10k, 10k) +// 范围内正常计费,范围外 × 2 计费 func (s *BillingService) CalculateCostWithLongContext(model string, tokens UsageTokens, rateMultiplier float64, threshold int, extraMultiplier float64) (*CostBreakdown, error) { - // 1. 先正常计算全部 token 的成本 - cost, err := s.CalculateCost(model, tokens, rateMultiplier) - if err != nil { - return nil, err - } - - // 2. 如果未启用长上下文计费或未超过阈值,直接返回 + // 未启用长上下文计费,直接走正常计费 if threshold <= 0 || extraMultiplier <= 1 { - return cost, nil + return s.CalculateCost(model, tokens, rateMultiplier) } // 计算总输入 token(缓存读取 + 新输入) total := tokens.CacheReadTokens + tokens.InputTokens if total <= threshold { - return cost, nil + return s.CalculateCost(model, tokens, rateMultiplier) } - // 3. 拆分超出部分的 token - extra := total - threshold - var extraCacheTokens, extraInputTokens int + // 拆分成范围内和范围外 + var inRangeCacheTokens, inRangeInputTokens int + var outRangeCacheTokens, outRangeInputTokens int if tokens.CacheReadTokens >= threshold { - // 缓存已超过阈值:超出的缓存 + 全部输入 - extraCacheTokens = tokens.CacheReadTokens - threshold - extraInputTokens = tokens.InputTokens + // 缓存已超过阈值:范围内只有缓存,范围外是超出的缓存+全部输入 + inRangeCacheTokens = threshold + inRangeInputTokens = 0 + outRangeCacheTokens = tokens.CacheReadTokens - threshold + outRangeInputTokens = tokens.InputTokens } else { - // 缓存未超过阈值:只有输入超出部分 - extraCacheTokens = 0 - extraInputTokens = extra + // 缓存未超过阈值:范围内是全部缓存+部分输入,范围外是剩余输入 + inRangeCacheTokens = tokens.CacheReadTokens + inRangeInputTokens = threshold - tokens.CacheReadTokens + outRangeCacheTokens = 0 + outRangeInputTokens = tokens.InputTokens - inRangeInputTokens } - // 4. 计算超出部分的成本(只算输入和缓存读取) - extraTokens := UsageTokens{ - InputTokens: extraInputTokens, - CacheReadTokens: extraCacheTokens, + // 范围内部分:正常计费 + inRangeTokens := UsageTokens{ + InputTokens: inRangeInputTokens, + OutputTokens: tokens.OutputTokens, // 输出只算一次 + CacheCreationTokens: tokens.CacheCreationTokens, + CacheReadTokens: inRangeCacheTokens, } - extraCost, err := s.CalculateCost(model, extraTokens, 1.0) // 先按 1 倍算 + inRangeCost, err := s.CalculateCost(model, inRangeTokens, rateMultiplier) if err != nil { - return cost, nil // 出错时返回正常成本 + return nil, err } - // 5. 额外成本 = 超出部分成本 × (倍率 - 1) - extraRate := extraMultiplier - 1 - additionalInputCost := extraCost.InputCost * extraRate - additionalCacheCost := extraCost.CacheReadCost * extraRate + // 范围外部分:× extraMultiplier 计费 + outRangeTokens := UsageTokens{ + InputTokens: outRangeInputTokens, + CacheReadTokens: outRangeCacheTokens, + } + outRangeCost, err := s.CalculateCost(model, outRangeTokens, rateMultiplier*extraMultiplier) + if err != nil { + return inRangeCost, nil // 出错时返回范围内成本 + } - // 6. 累加到总成本 - cost.InputCost += additionalInputCost - cost.CacheReadCost += additionalCacheCost - cost.TotalCost += additionalInputCost + additionalCacheCost - cost.ActualCost = cost.TotalCost * rateMultiplier - - return cost, nil + // 合并成本 + return &CostBreakdown{ + InputCost: inRangeCost.InputCost + outRangeCost.InputCost, + OutputCost: inRangeCost.OutputCost, + CacheCreationCost: inRangeCost.CacheCreationCost, + CacheReadCost: inRangeCost.CacheReadCost + outRangeCost.CacheReadCost, + TotalCost: inRangeCost.TotalCost + outRangeCost.TotalCost, + ActualCost: inRangeCost.ActualCost + outRangeCost.ActualCost, + }, nil } // ListSupportedModels 列出所有支持的模型(现在总是返回true,因为有模糊匹配) From e1a4a7b8c0a28f0c9e5bbfa08f8eb32f14619750 Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Mon, 2 Feb 2026 16:46:25 +0800 Subject: [PATCH 080/214] =?UTF-8?q?feat(groups):=20=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E4=BB=8E=E5=85=B6=E4=BB=96=E5=88=86=E7=BB=84=E5=A4=8D=E5=88=B6?= =?UTF-8?q?=E8=B4=A6=E5=8F=B7=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 创建分组时可选择从已有分组复制账号 - 编辑分组时支持同步账号(全量替换操作) - 仅允许选择相同平台的源分组 - 添加完整的数据校验:去重、自引用检查、平台一致性检查 - 前端支持多选源分组,带提示说明操作行为 --- .../internal/handler/admin/group_handler.go | 72 ++++---- backend/internal/repository/group_repo.go | 58 +++++++ backend/internal/service/admin_service.go | 93 ++++++++++ backend/internal/service/group_service.go | 4 + frontend/src/i18n/locales/en.ts | 8 + frontend/src/i18n/locales/zh.ts | 8 + frontend/src/types/index.ts | 3 + frontend/src/views/admin/GroupsView.vue | 161 +++++++++++++++++- 8 files changed, 372 insertions(+), 35 deletions(-) diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go index 926624d2..f93edbc8 100644 --- a/backend/internal/handler/admin/group_handler.go +++ b/backend/internal/handler/admin/group_handler.go @@ -43,6 +43,8 @@ type CreateGroupRequest struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled bool `json:"model_routing_enabled"` + // 从指定分组复制账号(创建后自动绑定) + CopyAccountsFromGroupIDs []int64 `json:"copy_accounts_from_group_ids"` } // UpdateGroupRequest represents update group request @@ -66,6 +68,8 @@ type UpdateGroupRequest struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled *bool `json:"model_routing_enabled"` + // 从指定分组复制账号(同步操作:先清空当前分组的账号绑定,再绑定源分组的账号) + CopyAccountsFromGroupIDs []int64 `json:"copy_accounts_from_group_ids"` } // List handles listing all groups with pagination @@ -155,22 +159,23 @@ func (h *GroupHandler) Create(c *gin.Context) { } group, err := h.adminService.CreateGroup(c.Request.Context(), &service.CreateGroupInput{ - Name: req.Name, - Description: req.Description, - Platform: req.Platform, - RateMultiplier: req.RateMultiplier, - IsExclusive: req.IsExclusive, - SubscriptionType: req.SubscriptionType, - DailyLimitUSD: req.DailyLimitUSD, - WeeklyLimitUSD: req.WeeklyLimitUSD, - MonthlyLimitUSD: req.MonthlyLimitUSD, - ImagePrice1K: req.ImagePrice1K, - ImagePrice2K: req.ImagePrice2K, - ImagePrice4K: req.ImagePrice4K, - ClaudeCodeOnly: req.ClaudeCodeOnly, - FallbackGroupID: req.FallbackGroupID, - ModelRouting: req.ModelRouting, - ModelRoutingEnabled: req.ModelRoutingEnabled, + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + ModelRouting: req.ModelRouting, + ModelRoutingEnabled: req.ModelRoutingEnabled, + CopyAccountsFromGroupIDs: req.CopyAccountsFromGroupIDs, }) if err != nil { response.ErrorFrom(c, err) @@ -196,23 +201,24 @@ func (h *GroupHandler) Update(c *gin.Context) { } group, err := h.adminService.UpdateGroup(c.Request.Context(), groupID, &service.UpdateGroupInput{ - Name: req.Name, - Description: req.Description, - Platform: req.Platform, - RateMultiplier: req.RateMultiplier, - IsExclusive: req.IsExclusive, - Status: req.Status, - SubscriptionType: req.SubscriptionType, - DailyLimitUSD: req.DailyLimitUSD, - WeeklyLimitUSD: req.WeeklyLimitUSD, - MonthlyLimitUSD: req.MonthlyLimitUSD, - ImagePrice1K: req.ImagePrice1K, - ImagePrice2K: req.ImagePrice2K, - ImagePrice4K: req.ImagePrice4K, - ClaudeCodeOnly: req.ClaudeCodeOnly, - FallbackGroupID: req.FallbackGroupID, - ModelRouting: req.ModelRouting, - ModelRoutingEnabled: req.ModelRoutingEnabled, + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + Status: req.Status, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + ModelRouting: req.ModelRouting, + ModelRoutingEnabled: req.ModelRoutingEnabled, + CopyAccountsFromGroupIDs: req.CopyAccountsFromGroupIDs, }) if err != nil { response.ErrorFrom(c, err) diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index 5c4d6cf4..002e07da 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -425,3 +425,61 @@ func (r *groupRepository) loadAccountCounts(ctx context.Context, groupIDs []int6 return counts, nil } + +// GetAccountIDsByGroupIDs 获取多个分组的所有账号 ID(去重) +func (r *groupRepository) GetAccountIDsByGroupIDs(ctx context.Context, groupIDs []int64) ([]int64, error) { + if len(groupIDs) == 0 { + return nil, nil + } + + rows, err := r.sql.QueryContext( + ctx, + "SELECT DISTINCT account_id FROM account_groups WHERE group_id = ANY($1) ORDER BY account_id", + pq.Array(groupIDs), + ) + if err != nil { + return nil, err + } + defer rows.Close() + + var accountIDs []int64 + for rows.Next() { + var accountID int64 + if err := rows.Scan(&accountID); err != nil { + return nil, err + } + accountIDs = append(accountIDs, accountID) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return accountIDs, nil +} + +// BindAccountsToGroup 将多个账号绑定到指定分组(批量插入,忽略已存在的绑定) +func (r *groupRepository) BindAccountsToGroup(ctx context.Context, groupID int64, accountIDs []int64) error { + if len(accountIDs) == 0 { + return nil + } + + // 使用 INSERT ... ON CONFLICT DO NOTHING 忽略已存在的绑定 + _, err := r.sql.ExecContext( + ctx, + `INSERT INTO account_groups (account_id, group_id, priority, created_at) + SELECT unnest($1::bigint[]), $2, 50, NOW() + ON CONFLICT (account_id, group_id) DO NOTHING`, + pq.Array(accountIDs), + groupID, + ) + if err != nil { + return err + } + + // 发送调度器事件 + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &groupID, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue bind accounts to group failed: group=%d err=%v", groupID, err) + } + + return nil +} diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 0afa0716..ef2d526b 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -110,6 +110,8 @@ type CreateGroupInput struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 ModelRoutingEnabled bool // 是否启用模型路由 + // 从指定分组复制账号(创建分组后在同一事务内绑定) + CopyAccountsFromGroupIDs []int64 } type UpdateGroupInput struct { @@ -132,6 +134,8 @@ type UpdateGroupInput struct { // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 ModelRoutingEnabled *bool // 是否启用模型路由 + // 从指定分组复制账号(同步操作:先清空当前分组的账号绑定,再绑定源分组的账号) + CopyAccountsFromGroupIDs []int64 } type CreateAccountInput struct { @@ -572,6 +576,38 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn } } + // 如果指定了复制账号的源分组,先获取账号 ID 列表 + var accountIDsToCopy []int64 + if len(input.CopyAccountsFromGroupIDs) > 0 { + // 去重源分组 IDs + seen := make(map[int64]struct{}) + uniqueSourceGroupIDs := make([]int64, 0, len(input.CopyAccountsFromGroupIDs)) + for _, srcGroupID := range input.CopyAccountsFromGroupIDs { + if _, exists := seen[srcGroupID]; !exists { + seen[srcGroupID] = struct{}{} + uniqueSourceGroupIDs = append(uniqueSourceGroupIDs, srcGroupID) + } + } + + // 校验源分组的平台是否与新分组一致 + for _, srcGroupID := range uniqueSourceGroupIDs { + srcGroup, err := s.groupRepo.GetByIDLite(ctx, srcGroupID) + if err != nil { + return nil, fmt.Errorf("source group %d not found: %w", srcGroupID, err) + } + if srcGroup.Platform != platform { + return nil, fmt.Errorf("source group %d platform mismatch: expected %s, got %s", srcGroupID, platform, srcGroup.Platform) + } + } + + // 获取所有源分组的账号(去重) + var err error + accountIDsToCopy, err = s.groupRepo.GetAccountIDsByGroupIDs(ctx, uniqueSourceGroupIDs) + if err != nil { + return nil, fmt.Errorf("failed to get accounts from source groups: %w", err) + } + } + group := &Group{ Name: input.Name, Description: input.Description, @@ -593,6 +629,15 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn if err := s.groupRepo.Create(ctx, group); err != nil { return nil, err } + + // 如果有需要复制的账号,绑定到新分组 + if len(accountIDsToCopy) > 0 { + if err := s.groupRepo.BindAccountsToGroup(ctx, group.ID, accountIDsToCopy); err != nil { + return nil, fmt.Errorf("failed to bind accounts to new group: %w", err) + } + group.AccountCount = int64(len(accountIDsToCopy)) + } + return group, nil } @@ -728,6 +773,54 @@ func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *Upd if err := s.groupRepo.Update(ctx, group); err != nil { return nil, err } + + // 如果指定了复制账号的源分组,同步绑定(替换当前分组的账号) + if len(input.CopyAccountsFromGroupIDs) > 0 { + // 去重源分组 IDs + seen := make(map[int64]struct{}) + uniqueSourceGroupIDs := make([]int64, 0, len(input.CopyAccountsFromGroupIDs)) + for _, srcGroupID := range input.CopyAccountsFromGroupIDs { + // 校验:源分组不能是自身 + if srcGroupID == id { + return nil, fmt.Errorf("cannot copy accounts from self") + } + // 去重 + if _, exists := seen[srcGroupID]; !exists { + seen[srcGroupID] = struct{}{} + uniqueSourceGroupIDs = append(uniqueSourceGroupIDs, srcGroupID) + } + } + + // 校验源分组的平台是否与当前分组一致 + for _, srcGroupID := range uniqueSourceGroupIDs { + srcGroup, err := s.groupRepo.GetByIDLite(ctx, srcGroupID) + if err != nil { + return nil, fmt.Errorf("source group %d not found: %w", srcGroupID, err) + } + if srcGroup.Platform != group.Platform { + return nil, fmt.Errorf("source group %d platform mismatch: expected %s, got %s", srcGroupID, group.Platform, srcGroup.Platform) + } + } + + // 获取所有源分组的账号(去重) + accountIDsToCopy, err := s.groupRepo.GetAccountIDsByGroupIDs(ctx, uniqueSourceGroupIDs) + if err != nil { + return nil, fmt.Errorf("failed to get accounts from source groups: %w", err) + } + + // 先清空当前分组的所有账号绑定 + if _, err := s.groupRepo.DeleteAccountGroupsByGroupID(ctx, id); err != nil { + return nil, fmt.Errorf("failed to clear existing account bindings: %w", err) + } + + // 再绑定源分组的账号 + if len(accountIDsToCopy) > 0 { + if err := s.groupRepo.BindAccountsToGroup(ctx, id, accountIDsToCopy); err != nil { + return nil, fmt.Errorf("failed to bind accounts to group: %w", err) + } + } + } + if s.authCacheInvalidator != nil { s.authCacheInvalidator.InvalidateAuthCacheByGroupID(ctx, id) } diff --git a/backend/internal/service/group_service.go b/backend/internal/service/group_service.go index 324f347b..a2bf2073 100644 --- a/backend/internal/service/group_service.go +++ b/backend/internal/service/group_service.go @@ -29,6 +29,10 @@ type GroupRepository interface { ExistsByName(ctx context.Context, name string) (bool, error) GetAccountCount(ctx context.Context, groupID int64) (int64, error) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) + // GetAccountIDsByGroupIDs 获取多个分组的所有账号 ID(去重) + GetAccountIDsByGroupIDs(ctx context.Context, groupIDs []int64) ([]int64, error) + // BindAccountsToGroup 将多个账号绑定到指定分组 + BindAccountsToGroup(ctx context.Context, groupID int64, accountIDs []int64) error } // CreateGroupRequest 创建分组请求 diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 1d53ddb6..7c4df36b 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1004,6 +1004,14 @@ export default { fallbackHint: 'Non-Claude Code requests will use this group. Leave empty to reject directly.', noFallback: 'No Fallback (Reject)' }, + copyAccounts: { + title: 'Copy Accounts from Groups', + tooltip: 'Select one or more groups of the same platform. After creation, all accounts from these groups will be automatically bound to the new group (deduplicated).', + tooltipEdit: 'Select one or more groups of the same platform. After saving, current group accounts will be replaced with accounts from these groups (deduplicated).', + selectPlaceholder: 'Select groups to copy accounts from...', + hint: 'Multiple groups can be selected, accounts will be deduplicated', + hintEdit: '⚠️ Warning: This will replace all existing account bindings' + }, modelRouting: { title: 'Model Routing', tooltip: 'Configure specific model requests to be routed to designated accounts. Supports wildcard matching, e.g., claude-opus-* matches all opus models.', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index a0ed426e..ba1c775f 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1079,6 +1079,14 @@ export default { fallbackHint: '非 Claude Code 请求将使用此分组,留空则直接拒绝', noFallback: '不降级(直接拒绝)' }, + copyAccounts: { + title: '从分组复制账号', + tooltip: '选择一个或多个相同平台的分组,创建后会自动将这些分组的所有账号绑定到新分组(去重)。', + tooltipEdit: '选择一个或多个相同平台的分组,保存后当前分组的账号会被替换为这些分组的账号(去重)。', + selectPlaceholder: '选择分组以复制其账号...', + hint: '可选多个分组,账号会自动去重', + hintEdit: '⚠️ 注意:这会替换当前分组的所有账号绑定' + }, modelRouting: { title: '模型路由配置', tooltip: '配置特定模型请求优先路由到指定账号。支持通配符匹配,如 claude-opus-* 匹配所有 opus 模型。', diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 9802d5c8..7c6cbf52 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -411,6 +411,8 @@ export interface CreateGroupRequest { image_price_4k?: number | null claude_code_only?: boolean fallback_group_id?: number | null + // 从指定分组复制账号 + copy_accounts_from_group_ids?: number[] } export interface UpdateGroupRequest { @@ -429,6 +431,7 @@ export interface UpdateGroupRequest { image_price_4k?: number | null claude_code_only?: boolean fallback_group_id?: number | null + copy_accounts_from_group_ids?: number[] } // ==================== Account & Proxy Types ==================== diff --git a/frontend/src/views/admin/GroupsView.vue b/frontend/src/views/admin/GroupsView.vue index 78ef2e48..bf924f53 100644 --- a/frontend/src/views/admin/GroupsView.vue +++ b/frontend/src/views/admin/GroupsView.vue @@ -240,9 +240,73 @@ v-model="createForm.platform" :options="platformOptions" data-tour="group-form-platform" + @change="createForm.copy_accounts_from_group_ids = []" />

{{ t('admin.groups.platformHint') }}

+ +
+
+ +
+ +
+
+

+ {{ t('admin.groups.copyAccounts.tooltip') }} +

+
+
+
+
+
+ +
+ + {{ copyAccountsGroupOptions.find(o => o.value === groupId)?.label || `#${groupId}` }} + + +
+ + +

{{ t('admin.groups.copyAccounts.hint') }}

+

{{ t('admin.groups.platformNotEditable') }}

+ +
+
+ +
+ +
+
+

+ {{ t('admin.groups.copyAccounts.tooltipEdit') }} +

+
+
+
+
+
+ +
+ + {{ copyAccountsGroupOptionsForEdit.find(o => o.value === groupId)?.label || `#${groupId}` }} + + +
+ + +

{{ t('admin.groups.copyAccounts.hintEdit') }}

+
{ return options }) +// 复制账号的源分组选项(创建时)- 仅包含相同平台且有账号的分组 +const copyAccountsGroupOptions = computed(() => { + const eligibleGroups = groups.value.filter( + (g) => g.platform === createForm.platform && (g.account_count || 0) > 0 + ) + return eligibleGroups.map((g) => ({ + value: g.id, + label: `${g.name} (${g.account_count || 0} 个账号)` + })) +}) + +// 复制账号的源分组选项(编辑时)- 仅包含相同平台且有账号的分组,排除自身 +const copyAccountsGroupOptionsForEdit = computed(() => { + const currentId = editingGroup.value?.id + const eligibleGroups = groups.value.filter( + (g) => g.platform === editForm.platform && (g.account_count || 0) > 0 && g.id !== currentId + ) + return eligibleGroups.map((g) => ({ + value: g.id, + label: `${g.name} (${g.account_count || 0} 个账号)` + })) +}) + const groups = ref([]) const loading = ref(false) const searchQuery = ref('') @@ -1244,7 +1394,9 @@ const createForm = reactive({ claude_code_only: false, fallback_group_id: null as number | null, // 模型路由开关 - model_routing_enabled: false + model_routing_enabled: false, + // 从分组复制账号 + copy_accounts_from_group_ids: [] as number[] }) // 简单账号类型(用于模型路由选择) @@ -1415,7 +1567,9 @@ const editForm = reactive({ claude_code_only: false, fallback_group_id: null as number | null, // 模型路由开关 - model_routing_enabled: false + model_routing_enabled: false, + // 从分组复制账号 + copy_accounts_from_group_ids: [] as number[] }) // 根据分组类型返回不同的删除确认消息 @@ -1497,6 +1651,7 @@ const closeCreateModal = () => { createForm.image_price_4k = null createForm.claude_code_only = false createForm.fallback_group_id = null + createForm.copy_accounts_from_group_ids = [] createModelRoutingRules.value = [] } @@ -1547,6 +1702,7 @@ const handleEdit = async (group: AdminGroup) => { editForm.claude_code_only = group.claude_code_only || false editForm.fallback_group_id = group.fallback_group_id editForm.model_routing_enabled = group.model_routing_enabled || false + editForm.copy_accounts_from_group_ids = [] // 复制账号字段每次编辑时重置为空 // 加载模型路由规则(异步加载账号名称) editModelRoutingRules.value = await convertApiFormatToRoutingRules(group.model_routing) showEditModal.value = true @@ -1556,6 +1712,7 @@ const closeEditModal = () => { showEditModal.value = false editingGroup.value = null editModelRoutingRules.value = [] + editForm.copy_accounts_from_group_ids = [] } const handleUpdateGroup = async () => { From ce1d2904c7a049ceba8717bd43954addc95b90ad Mon Sep 17 00:00:00 2001 From: liuxiongfeng Date: Mon, 2 Feb 2026 22:01:41 +0800 Subject: [PATCH 081/214] =?UTF-8?q?test:=20=E4=B8=BA=E6=B5=8B=E8=AF=95=20s?= =?UTF-8?q?tub=20=E6=B7=BB=E5=8A=A0=E7=BC=BA=E5=A4=B1=E7=9A=84=20GroupRepo?= =?UTF-8?q?sitory=20=E6=8E=A5=E5=8F=A3=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增 BindAccountsToGroup 和 GetAccountIDsByGroupIDs 方法的 stub 实现, 确保测试文件中的 mock 类型满足 GroupRepository 接口要求。 --- backend/internal/repository/group_repo.go | 2 +- backend/internal/server/api_contract_test.go | 8 ++++++++ .../service/admin_service_delete_test.go | 8 ++++++++ .../internal/service/admin_service_group_test.go | 16 ++++++++++++++++ .../service/gateway_multiplatform_test.go | 8 ++++++++ .../service/gemini_multiplatform_test.go | 8 ++++++++ 6 files changed, 49 insertions(+), 1 deletion(-) diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index 002e07da..a5b0512d 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -440,7 +440,7 @@ func (r *groupRepository) GetAccountIDsByGroupIDs(ctx context.Context, groupIDs if err != nil { return nil, err } - defer rows.Close() + defer func() { _ = rows.Close() }() var accountIDs []int64 for rows.Next() { diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 22e6213e..6adab853 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -880,6 +880,14 @@ func (stubGroupRepo) DeleteAccountGroupsByGroupID(ctx context.Context, groupID i return 0, errors.New("not implemented") } +func (stubGroupRepo) BindAccountsToGroup(ctx context.Context, groupID int64, accountIDs []int64) error { + return errors.New("not implemented") +} + +func (stubGroupRepo) GetAccountIDsByGroupIDs(ctx context.Context, groupIDs []int64) ([]int64, error) { + return nil, errors.New("not implemented") +} + type stubAccountRepo struct { bulkUpdateIDs []int64 } diff --git a/backend/internal/service/admin_service_delete_test.go b/backend/internal/service/admin_service_delete_test.go index 6472ccbb..923d33ab 100644 --- a/backend/internal/service/admin_service_delete_test.go +++ b/backend/internal/service/admin_service_delete_test.go @@ -164,6 +164,14 @@ func (s *groupRepoStub) DeleteAccountGroupsByGroupID(ctx context.Context, groupI panic("unexpected DeleteAccountGroupsByGroupID call") } +func (s *groupRepoStub) BindAccountsToGroup(ctx context.Context, groupID int64, accountIDs []int64) error { + panic("unexpected BindAccountsToGroup call") +} + +func (s *groupRepoStub) GetAccountIDsByGroupIDs(ctx context.Context, groupIDs []int64) ([]int64, error) { + panic("unexpected GetAccountIDsByGroupIDs call") +} + type proxyRepoStub struct { deleteErr error countErr error diff --git a/backend/internal/service/admin_service_group_test.go b/backend/internal/service/admin_service_group_test.go index e0574e2e..1daee89f 100644 --- a/backend/internal/service/admin_service_group_test.go +++ b/backend/internal/service/admin_service_group_test.go @@ -108,6 +108,14 @@ func (s *groupRepoStubForAdmin) DeleteAccountGroupsByGroupID(_ context.Context, panic("unexpected DeleteAccountGroupsByGroupID call") } +func (s *groupRepoStubForAdmin) BindAccountsToGroup(_ context.Context, _ int64, _ []int64) error { + panic("unexpected BindAccountsToGroup call") +} + +func (s *groupRepoStubForAdmin) GetAccountIDsByGroupIDs(_ context.Context, _ []int64) ([]int64, error) { + panic("unexpected GetAccountIDsByGroupIDs call") +} + // TestAdminService_CreateGroup_WithImagePricing 测试创建分组时 ImagePrice 字段正确传递 func TestAdminService_CreateGroup_WithImagePricing(t *testing.T) { repo := &groupRepoStubForAdmin{} @@ -378,3 +386,11 @@ func (s *groupRepoStubForFallbackCycle) GetAccountCount(_ context.Context, _ int func (s *groupRepoStubForFallbackCycle) DeleteAccountGroupsByGroupID(_ context.Context, _ int64) (int64, error) { panic("unexpected DeleteAccountGroupsByGroupID call") } + +func (s *groupRepoStubForFallbackCycle) BindAccountsToGroup(_ context.Context, _ int64, _ []int64) error { + panic("unexpected BindAccountsToGroup call") +} + +func (s *groupRepoStubForFallbackCycle) GetAccountIDsByGroupIDs(_ context.Context, _ []int64) ([]int64, error) { + panic("unexpected GetAccountIDsByGroupIDs call") +} diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index 26eb24e4..4bfa23d1 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -266,6 +266,14 @@ func (m *mockGroupRepoForGateway) DeleteAccountGroupsByGroupID(ctx context.Conte return 0, nil } +func (m *mockGroupRepoForGateway) BindAccountsToGroup(ctx context.Context, groupID int64, accountIDs []int64) error { + return nil +} + +func (m *mockGroupRepoForGateway) GetAccountIDsByGroupIDs(ctx context.Context, groupIDs []int64) ([]int64, error) { + return nil, nil +} + func ptr[T any](v T) *T { return &v } diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index c63a020c..e7ed80fd 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -218,6 +218,14 @@ func (m *mockGroupRepoForGemini) DeleteAccountGroupsByGroupID(ctx context.Contex return 0, nil } +func (m *mockGroupRepoForGemini) BindAccountsToGroup(ctx context.Context, groupID int64, accountIDs []int64) error { + return nil +} + +func (m *mockGroupRepoForGemini) GetAccountIDsByGroupIDs(ctx context.Context, groupIDs []int64) ([]int64, error) { + return nil, nil +} + var _ GroupRepository = (*mockGroupRepoForGemini)(nil) // mockGatewayCacheForGemini Gemini 测试用的 cache mock From 0170d19fa7d9fdb5467dfbecb2dcef3372423066 Mon Sep 17 00:00:00 2001 From: song Date: Mon, 2 Feb 2026 22:13:50 +0800 Subject: [PATCH 082/214] merge upstream main --- .github/workflows/backend-ci.yml | 4 +- .github/workflows/release.yml | 5 +- .github/workflows/security-scan.yml | 2 +- Dockerfile | 2 +- README.md | 130 +- README_CN.md | 128 +- backend/cmd/server/VERSION | 2 +- backend/cmd/server/main.go | 19 + backend/cmd/server/wire.go | 12 + backend/cmd/server/wire_gen.go | 55 +- backend/ent/announcement.go | 249 + backend/ent/announcement/announcement.go | 164 + backend/ent/announcement/where.go | 624 ++ backend/ent/announcement_create.go | 1159 +++ backend/ent/announcement_delete.go | 88 + backend/ent/announcement_query.go | 643 ++ backend/ent/announcement_update.go | 824 ++ backend/ent/announcementread.go | 185 + .../ent/announcementread/announcementread.go | 127 + backend/ent/announcementread/where.go | 257 + backend/ent/announcementread_create.go | 660 ++ backend/ent/announcementread_delete.go | 88 + backend/ent/announcementread_query.go | 718 ++ backend/ent/announcementread_update.go | 456 ++ backend/ent/client.go | 511 +- backend/ent/ent.go | 6 + backend/ent/hook/hook.go | 36 + backend/ent/intercept/intercept.go | 90 + backend/ent/migrate/schema.go | 147 + backend/ent/mutation.go | 3047 +++++++- backend/ent/predicate/predicate.go | 9 + backend/ent/runtime/runtime.go | 94 + backend/ent/schema/account.go | 4 +- backend/ent/schema/announcement.go | 90 + backend/ent/schema/announcement_read.go | 65 + backend/ent/schema/api_key.go | 4 +- backend/ent/schema/group.go | 8 +- backend/ent/schema/mixins/soft_delete.go | 43 +- backend/ent/schema/promo_code.go | 4 +- backend/ent/schema/redeem_code.go | 6 +- backend/ent/schema/usage_cleanup_task.go | 75 + backend/ent/schema/user.go | 18 +- backend/ent/schema/user_subscription.go | 4 +- backend/ent/tx.go | 9 + backend/ent/usagecleanuptask.go | 236 + .../ent/usagecleanuptask/usagecleanuptask.go | 137 + backend/ent/usagecleanuptask/where.go | 620 ++ backend/ent/usagecleanuptask_create.go | 1190 +++ backend/ent/usagecleanuptask_delete.go | 88 + backend/ent/usagecleanuptask_query.go | 564 ++ backend/ent/usagecleanuptask_update.go | 702 ++ backend/ent/user.go | 73 +- backend/ent/user/user.go | 56 + backend/ent/user/where.go | 173 + backend/ent/user_create.go | 253 + backend/ent/user_query.go | 76 +- backend/ent/user_update.go | 301 + backend/go.mod | 2 +- backend/internal/config/config.go | 109 + backend/internal/config/config_test.go | 570 ++ backend/internal/domain/announcement.go | 226 + backend/internal/domain/constants.go | 64 + .../internal/handler/admin/account_handler.go | 145 +- .../admin/admin_basic_handlers_test.go | 262 + .../handler/admin/admin_helpers_test.go | 134 + .../handler/admin/admin_service_stub_test.go | 294 + .../handler/admin/announcement_handler.go | 246 + .../handler/admin/dashboard_handler.go | 28 +- .../internal/handler/admin/group_handler.go | 14 +- .../internal/handler/admin/redeem_handler.go | 12 +- .../internal/handler/admin/setting_handler.go | 155 +- .../handler/admin/subscription_handler.go | 34 +- .../admin/usage_cleanup_handler_test.go | 377 + .../internal/handler/admin/usage_handler.go | 194 +- .../internal/handler/admin/user_handler.go | 12 +- .../internal/handler/announcement_handler.go | 81 + backend/internal/handler/auth_handler.go | 191 +- backend/internal/handler/dto/announcement.go | 74 + backend/internal/handler/dto/mappers.go | 218 +- backend/internal/handler/dto/settings.go | 57 +- backend/internal/handler/dto/types.go | 131 +- backend/internal/handler/gateway_handler.go | 269 +- .../handler/gemini_cli_session_test.go | 122 + .../internal/handler/gemini_v1beta_handler.go | 86 +- backend/internal/handler/handler.go | 3 + .../handler/openai_gateway_handler.go | 4 +- backend/internal/handler/ops_error_logger.go | 9 +- backend/internal/handler/setting_handler.go | 32 +- backend/internal/handler/totp_handler.go | 181 + backend/internal/handler/user_handler.go | 6 - backend/internal/handler/wire.go | 9 + .../rate_limiter_integration_test.go | 44 + backend/internal/pkg/antigravity/oauth.go | 2 +- .../pkg/antigravity/request_transformer.go | 16 +- .../antigravity/request_transformer_test.go | 19 +- .../pkg/antigravity/response_transformer.go | 21 +- backend/internal/pkg/gemini/models.go | 11 +- backend/internal/pkg/geminicli/models.go | 4 +- backend/internal/pkg/oauth/oauth.go | 123 +- backend/internal/pkg/response/response.go | 11 +- backend/internal/pkg/tlsfingerprint/dialer.go | 568 ++ .../tlsfingerprint/dialer_integration_test.go | 278 + .../pkg/tlsfingerprint/dialer_test.go | 160 + .../internal/pkg/tlsfingerprint/registry.go | 171 + .../pkg/tlsfingerprint/registry_test.go | 243 + backend/internal/repository/account_repo.go | 99 +- .../account_repo_integration_test.go | 89 +- backend/internal/repository/aes_encryptor.go | 95 + .../repository/announcement_read_repo.go | 83 + .../internal/repository/announcement_repo.go | 194 + backend/internal/repository/api_key_cache.go | 50 +- backend/internal/repository/api_key_repo.go | 25 +- .../repository/claude_oauth_service.go | 28 +- .../repository/claude_oauth_service_test.go | 2 +- .../repository/claude_usage_service.go | 71 +- .../repository/dashboard_aggregation_repo.go | 69 + backend/internal/repository/email_cache.go | 58 +- backend/internal/repository/ent.go | 13 + backend/internal/repository/gateway_cache.go | 12 + .../gateway_cache_integration_test.go | 13 + .../gateway_routing_integration_test.go | 2 +- backend/internal/repository/http_upstream.go | 226 + backend/internal/repository/identity_cache.go | 28 +- .../repository/openai_oauth_service.go | 15 +- .../repository/openai_oauth_service_test.go | 7 + backend/internal/repository/ops_repo.go | 7 +- backend/internal/repository/redis.go | 12 +- backend/internal/repository/redis_test.go | 12 + .../internal/repository/req_client_pool.go | 7 +- .../repository/req_client_pool_test.go | 90 + .../internal/repository/scheduler_cache.go | 4 +- ...eduler_snapshot_outbox_integration_test.go | 2 +- .../repository/session_limit_cache.go | 11 +- .../repository/simple_mode_default_groups.go | 82 + ...le_mode_default_groups_integration_test.go | 84 + backend/internal/repository/totp_cache.go | 149 + .../internal/repository/usage_cleanup_repo.go | 551 ++ .../repository/usage_cleanup_repo_ent_test.go | 251 + .../repository/usage_cleanup_repo_test.go | 482 ++ backend/internal/repository/usage_log_repo.go | 14 +- .../usage_log_repo_integration_test.go | 14 +- backend/internal/repository/user_repo.go | 45 + .../repository/user_subscription_repo.go | 53 +- ...user_subscription_repo_integration_test.go | 8 +- backend/internal/repository/wire.go | 7 + backend/internal/server/api_contract_test.go | 408 +- .../server/middleware/api_key_auth_test.go | 2 +- backend/internal/server/routes/admin.go | 19 +- backend/internal/server/routes/auth.go | 9 + backend/internal/server/routes/user.go | 18 + backend/internal/service/account.go | 84 + backend/internal/service/account_service.go | 1 - .../service/account_service_delete_test.go | 4 - .../internal/service/account_test_service.go | 6 +- .../internal/service/account_usage_service.go | 49 +- backend/internal/service/admin_service.go | 8 - .../service/admin_service_delete_test.go | 12 + backend/internal/service/announcement.go | 64 + .../internal/service/announcement_service.go | 378 + .../service/announcement_targeting_test.go | 66 + .../service/antigravity_gateway_service.go | 111 +- .../service/antigravity_model_mapping_test.go | 8 +- .../service/antigravity_oauth_service.go | 68 +- .../service/antigravity_rate_limit_test.go | 20 +- .../service/antigravity_token_provider.go | 36 +- .../service/antigravity_token_refresher.go | 23 +- .../service/api_key_auth_cache_impl.go | 16 + backend/internal/service/api_key_service.go | 4 + .../service/api_key_service_cache_test.go | 8 + .../service/api_key_service_delete_test.go | 8 + backend/internal/service/auth_service.go | 150 +- .../service/auth_service_register_test.go | 20 + .../internal/service/claude_token_provider.go | 47 +- .../service/dashboard_aggregation_service.go | 61 +- .../dashboard_aggregation_service_test.go | 4 + backend/internal/service/dashboard_service.go | 8 +- .../service/dashboard_service_test.go | 4 + backend/internal/service/domain_constants.go | 82 +- .../internal/service/email_queue_service.go | 37 +- backend/internal/service/email_service.go | 186 +- .../service/gateway_multiplatform_test.go | 1625 +++- backend/internal/service/gateway_service.go | 498 +- .../service/gemini_messages_compat_service.go | 443 +- .../service/gemini_multiplatform_test.go | 298 +- .../gemini_native_signature_cleaner.go | 72 + .../internal/service/gemini_token_provider.go | 36 +- .../internal/service/http_upstream_port.go | 25 + backend/internal/service/identity_service.go | 97 + backend/internal/service/oauth_service.go | 19 +- .../service/openai_codex_transform.go | 28 +- .../service/openai_codex_transform_test.go | 31 + .../service/openai_gateway_service.go | 472 +- .../service/openai_gateway_service_test.go | 649 +- .../internal/service/openai_oauth_service.go | 29 +- .../internal/service/openai_token_provider.go | 47 +- .../internal/service/openai_tool_corrector.go | 77 +- .../service/openai_tool_corrector_test.go | 19 +- .../internal/service/ops_settings_models.go | 1 + backend/internal/service/pricing_service.go | 4 +- backend/internal/service/ratelimit_service.go | 167 +- .../service/ratelimit_service_openai_test.go | 364 + .../internal/service/session_limit_cache.go | 3 +- backend/internal/service/setting_service.go | 176 +- backend/internal/service/settings_view.go | 54 +- .../internal/service/sticky_session_test.go | 54 + .../service/subscription_expiry_service.go | 71 + .../internal/service/subscription_service.go | 57 +- .../service/token_cache_invalidator.go | 87 +- .../service/token_cache_invalidator_test.go | 287 +- .../internal/service/token_refresh_service.go | 19 +- .../service/token_refresh_service_test.go | 20 +- backend/internal/service/totp_service.go | 506 ++ backend/internal/service/usage_cleanup.go | 74 + .../internal/service/usage_cleanup_service.go | 404 + .../service/usage_cleanup_service_test.go | 818 ++ backend/internal/service/user_service.go | 5 + .../service/user_subscription_port.go | 2 +- backend/internal/service/wire.go | 24 +- backend/internal/setup/cli.go | 3 + backend/internal/setup/handler.go | 18 +- backend/internal/setup/setup.go | 32 +- .../internal/util/urlvalidator/validator.go | 2 +- .../util/urlvalidator/validator_test.go | 27 + .../006_add_users_allowed_groups_compat.sql | 15 + .../006b_guard_users_allowed_groups.sql | 27 + .../042_add_usage_cleanup_tasks.sql | 21 + ...b_add_ops_system_metrics_switch_count.sql} | 0 .../043_add_usage_cleanup_cancel_audit.sql | 10 + ...3b_add_group_invalid_request_fallback.sql} | 2 +- backend/migrations/044_add_user_totp.sql | 12 + ....sql => 044b_add_group_mcp_xml_inject.sql} | 0 backend/migrations/045_add_announcements.sql | 44 + config.yaml | 24 + deploy/.env.example | 13 + deploy/.gitignore | 19 + deploy/README.md | 204 +- build_image.sh => deploy/build_image.sh | 0 deploy/config.example.yaml | 52 + deploy/docker-compose.local.yml | 222 + deploy/docker-compose.standalone.yml | 1 + deploy/docker-compose.yml | 11 + deploy/docker-deploy.sh | 171 + frontend/package-lock.json | 6954 ----------------- frontend/package.json | 5 + frontend/pnpm-lock.yaml | 192 + frontend/src/api/admin/announcements.ts | 71 + frontend/src/api/admin/dashboard.ts | 2 + frontend/src/api/admin/groups.ts | 26 +- frontend/src/api/admin/index.ts | 3 + frontend/src/api/admin/ops.ts | 1 + frontend/src/api/admin/settings.ts | 13 + frontend/src/api/admin/subscriptions.ts | 4 +- frontend/src/api/admin/usage.ts | 88 +- frontend/src/api/admin/users.ts | 27 +- frontend/src/api/announcements.ts | 26 + frontend/src/api/auth.ts | 96 +- frontend/src/api/index.ts | 4 +- frontend/src/api/redeem.ts | 4 +- frontend/src/api/setup.ts | 1 + frontend/src/api/totp.ts | 83 + .../account/AccountStatusIndicator.vue | 60 +- .../components/account/AccountTestModal.vue | 5 +- .../account/BulkEditAccountModal.vue | 4 +- .../components/account/CreateAccountModal.vue | 267 +- .../components/account/EditAccountModal.vue | 92 +- .../admin/account/AccountActionMenu.vue | 88 +- .../admin/account/AccountTableActions.vue | 2 + .../admin/account/AccountTestModal.vue | 5 +- .../AnnouncementReadStatusDialog.vue | 186 + .../AnnouncementTargetingEditor.vue | 408 + .../admin/usage/UsageCleanupDialog.vue | 380 + .../components/admin/usage/UsageFilters.vue | 25 +- .../src/components/admin/usage/UsageTable.vue | 10 +- .../admin/user/UserAllowedGroupsModal.vue | 6 +- .../admin/user/UserApiKeysModal.vue | 6 +- .../admin/user/UserBalanceModal.vue | 4 +- .../components/admin/user/UserEditModal.vue | 4 +- .../src/components/auth/TotpLoginModal.vue | 176 + .../components/common/AnnouncementBell.vue | 626 ++ frontend/src/components/common/DataTable.vue | 219 +- .../src/components/common/GroupSelector.vue | 4 +- frontend/src/components/common/Pagination.vue | 38 +- frontend/src/components/common/README.md | 3 + frontend/src/components/icons/Icon.vue | 3 + frontend/src/components/keys/UseKeyModal.vue | 26 +- frontend/src/components/layout/AppHeader.vue | 6 +- frontend/src/components/layout/AppSidebar.vue | 36 + .../user/profile/ProfileTotpCard.vue | 154 + .../user/profile/TotpDisableDialog.vue | 179 + .../user/profile/TotpSetupModal.vue | 400 + frontend/src/composables/useAccountOAuth.ts | 4 + frontend/src/composables/useModelWhitelist.ts | 19 +- frontend/src/i18n/locales/en.ts | 300 +- frontend/src/i18n/locales/zh.ts | 300 +- frontend/src/router/index.ts | 42 + frontend/src/stores/app.ts | 5 + frontend/src/stores/auth.ts | 73 +- frontend/src/types/index.ts | 202 +- frontend/src/utils/format.ts | 64 + frontend/src/views/admin/AccountsView.vue | 322 +- .../src/views/admin/AnnouncementsView.vue | 538 ++ frontend/src/views/admin/GroupsView.vue | 19 +- frontend/src/views/admin/RedeemView.vue | 44 +- frontend/src/views/admin/SettingsView.vue | 129 + .../src/views/admin/SubscriptionsView.vue | 299 +- frontend/src/views/admin/UsageView.vue | 26 +- frontend/src/views/admin/UsersView.vue | 30 +- .../ops/components/OpsSettingsDialog.vue | 10 + .../src/views/auth/ForgotPasswordView.vue | 297 + frontend/src/views/auth/LoginView.vue | 84 +- frontend/src/views/auth/RegisterView.vue | 22 +- frontend/src/views/auth/ResetPasswordView.vue | 355 + frontend/src/views/setup/SetupWizardView.vue | 15 +- frontend/src/views/user/KeysView.vue | 1 + frontend/src/views/user/ProfileView.vue | 2 + .../views/user/PurchaseSubscriptionView.vue | 121 + frontend/src/views/user/RedeemView.vue | 8 + frontend/tsconfig.json | 1 + frontend/vite.config.ts | 53 +- 319 files changed, 40485 insertions(+), 8969 deletions(-) create mode 100644 backend/ent/announcement.go create mode 100644 backend/ent/announcement/announcement.go create mode 100644 backend/ent/announcement/where.go create mode 100644 backend/ent/announcement_create.go create mode 100644 backend/ent/announcement_delete.go create mode 100644 backend/ent/announcement_query.go create mode 100644 backend/ent/announcement_update.go create mode 100644 backend/ent/announcementread.go create mode 100644 backend/ent/announcementread/announcementread.go create mode 100644 backend/ent/announcementread/where.go create mode 100644 backend/ent/announcementread_create.go create mode 100644 backend/ent/announcementread_delete.go create mode 100644 backend/ent/announcementread_query.go create mode 100644 backend/ent/announcementread_update.go create mode 100644 backend/ent/schema/announcement.go create mode 100644 backend/ent/schema/announcement_read.go create mode 100644 backend/ent/schema/usage_cleanup_task.go create mode 100644 backend/ent/usagecleanuptask.go create mode 100644 backend/ent/usagecleanuptask/usagecleanuptask.go create mode 100644 backend/ent/usagecleanuptask/where.go create mode 100644 backend/ent/usagecleanuptask_create.go create mode 100644 backend/ent/usagecleanuptask_delete.go create mode 100644 backend/ent/usagecleanuptask_query.go create mode 100644 backend/ent/usagecleanuptask_update.go create mode 100644 backend/internal/domain/announcement.go create mode 100644 backend/internal/domain/constants.go create mode 100644 backend/internal/handler/admin/admin_basic_handlers_test.go create mode 100644 backend/internal/handler/admin/admin_helpers_test.go create mode 100644 backend/internal/handler/admin/admin_service_stub_test.go create mode 100644 backend/internal/handler/admin/announcement_handler.go create mode 100644 backend/internal/handler/admin/usage_cleanup_handler_test.go create mode 100644 backend/internal/handler/announcement_handler.go create mode 100644 backend/internal/handler/dto/announcement.go create mode 100644 backend/internal/handler/gemini_cli_session_test.go create mode 100644 backend/internal/handler/totp_handler.go create mode 100644 backend/internal/pkg/tlsfingerprint/dialer.go create mode 100644 backend/internal/pkg/tlsfingerprint/dialer_integration_test.go create mode 100644 backend/internal/pkg/tlsfingerprint/dialer_test.go create mode 100644 backend/internal/pkg/tlsfingerprint/registry.go create mode 100644 backend/internal/pkg/tlsfingerprint/registry_test.go create mode 100644 backend/internal/repository/aes_encryptor.go create mode 100644 backend/internal/repository/announcement_read_repo.go create mode 100644 backend/internal/repository/announcement_repo.go create mode 100644 backend/internal/repository/req_client_pool_test.go create mode 100644 backend/internal/repository/simple_mode_default_groups.go create mode 100644 backend/internal/repository/simple_mode_default_groups_integration_test.go create mode 100644 backend/internal/repository/totp_cache.go create mode 100644 backend/internal/repository/usage_cleanup_repo.go create mode 100644 backend/internal/repository/usage_cleanup_repo_ent_test.go create mode 100644 backend/internal/repository/usage_cleanup_repo_test.go create mode 100644 backend/internal/service/announcement.go create mode 100644 backend/internal/service/announcement_service.go create mode 100644 backend/internal/service/announcement_targeting_test.go create mode 100644 backend/internal/service/gemini_native_signature_cleaner.go create mode 100644 backend/internal/service/ratelimit_service_openai_test.go create mode 100644 backend/internal/service/sticky_session_test.go create mode 100644 backend/internal/service/subscription_expiry_service.go create mode 100644 backend/internal/service/totp_service.go create mode 100644 backend/internal/service/usage_cleanup.go create mode 100644 backend/internal/service/usage_cleanup_service.go create mode 100644 backend/internal/service/usage_cleanup_service_test.go create mode 100644 backend/migrations/006_add_users_allowed_groups_compat.sql create mode 100644 backend/migrations/006b_guard_users_allowed_groups.sql create mode 100644 backend/migrations/042_add_usage_cleanup_tasks.sql rename backend/migrations/{042_add_ops_system_metrics_switch_count.sql => 042b_add_ops_system_metrics_switch_count.sql} (100%) create mode 100644 backend/migrations/043_add_usage_cleanup_cancel_audit.sql rename backend/migrations/{043_add_group_invalid_request_fallback.sql => 043b_add_group_invalid_request_fallback.sql} (92%) create mode 100644 backend/migrations/044_add_user_totp.sql rename backend/migrations/{044_add_group_mcp_xml_inject.sql => 044b_add_group_mcp_xml_inject.sql} (100%) create mode 100644 backend/migrations/045_add_announcements.sql create mode 100644 deploy/.gitignore rename build_image.sh => deploy/build_image.sh (100%) create mode 100644 deploy/docker-compose.local.yml create mode 100644 deploy/docker-deploy.sh delete mode 100644 frontend/package-lock.json create mode 100644 frontend/src/api/admin/announcements.ts create mode 100644 frontend/src/api/announcements.ts create mode 100644 frontend/src/api/totp.ts create mode 100644 frontend/src/components/admin/announcements/AnnouncementReadStatusDialog.vue create mode 100644 frontend/src/components/admin/announcements/AnnouncementTargetingEditor.vue create mode 100644 frontend/src/components/admin/usage/UsageCleanupDialog.vue create mode 100644 frontend/src/components/auth/TotpLoginModal.vue create mode 100644 frontend/src/components/common/AnnouncementBell.vue create mode 100644 frontend/src/components/user/profile/ProfileTotpCard.vue create mode 100644 frontend/src/components/user/profile/TotpDisableDialog.vue create mode 100644 frontend/src/components/user/profile/TotpSetupModal.vue create mode 100644 frontend/src/views/admin/AnnouncementsView.vue create mode 100644 frontend/src/views/auth/ForgotPasswordView.vue create mode 100644 frontend/src/views/auth/ResetPasswordView.vue create mode 100644 frontend/src/views/user/PurchaseSubscriptionView.vue diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml index 3ea8860a..e5624f86 100644 --- a/.github/workflows/backend-ci.yml +++ b/.github/workflows/backend-ci.yml @@ -19,7 +19,7 @@ jobs: cache: true - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: Unit tests working-directory: backend run: make test-unit @@ -38,7 +38,7 @@ jobs: cache: true - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: golangci-lint uses: golangci/golangci-lint-action@v9 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 73ca35d9..f45c1a0b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -115,7 +115,7 @@ jobs: - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' # Docker setup for GoReleaser - name: Set up QEMU @@ -222,8 +222,9 @@ jobs: REPO="${{ github.repository }}" GHCR_IMAGE="ghcr.io/${REPO,,}" # ${,,} converts to lowercase - # 获取 tag message 内容 + # 获取 tag message 内容并转义 Markdown 特殊字符 TAG_MESSAGE='${{ steps.tag_message.outputs.message }}' + TAG_MESSAGE=$(echo "$TAG_MESSAGE" | sed 's/\([_*`\[]\)/\\\1/g') # 限制消息长度(Telegram 消息限制 4096 字符,预留空间给头尾固定内容) if [ ${#TAG_MESSAGE} -gt 3500 ]; then diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 160a0df9..dfb8e37e 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -22,7 +22,7 @@ jobs: cache-dependency-path: backend/go.sum - name: Verify Go version run: | - go version | grep -q 'go1.25.5' + go version | grep -q 'go1.25.6' - name: Run govulncheck working-directory: backend run: | diff --git a/Dockerfile b/Dockerfile index b3320300..3d4b5094 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ # ============================================================================= ARG NODE_IMAGE=node:24-alpine -ARG GOLANG_IMAGE=golang:1.25.5-alpine +ARG GOLANG_IMAGE=golang:1.25.6-alpine ARG ALPINE_IMAGE=alpine:3.20 ARG GOPROXY=https://goproxy.cn,direct ARG GOSUMDB=sum.golang.google.cn diff --git a/README.md b/README.md index fa965e6f..14656332 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ English | [中文](README_CN.md) ## Demo -Try Sub2API online: **https://v2.pincc.ai/** +Try Sub2API online: **https://demo.sub2api.org/** Demo credentials (shared demo environment; **not** created automatically for self-hosted installs): @@ -128,7 +128,7 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install --- -### Method 2: Docker Compose +### Method 2: Docker Compose (Recommended) Deploy with Docker Compose, including PostgreSQL and Redis containers. @@ -137,87 +137,157 @@ Deploy with Docker Compose, including PostgreSQL and Redis containers. - Docker 20.10+ - Docker Compose v2+ -#### Installation Steps +#### Quick Start (One-Click Deployment) + +Use the automated deployment script for easy setup: + +```bash +# Create deployment directory +mkdir -p sub2api-deploy && cd sub2api-deploy + +# Download and run deployment preparation script +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash + +# Start services +docker-compose -f docker-compose.local.yml up -d + +# View logs +docker-compose -f docker-compose.local.yml logs -f sub2api +``` + +**What the script does:** +- Downloads `docker-compose.local.yml` and `.env.example` +- Generates secure credentials (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD) +- Creates `.env` file with auto-generated secrets +- Creates data directories (uses local directories for easy backup/migration) +- Displays generated credentials for your reference + +#### Manual Deployment + +If you prefer manual setup: ```bash # 1. Clone the repository git clone https://github.com/Wei-Shaw/sub2api.git -cd sub2api +cd sub2api/deploy -# 2. Enter the deploy directory -cd deploy - -# 3. Copy environment configuration +# 2. Copy environment configuration cp .env.example .env -# 4. Edit configuration (set your passwords) +# 3. Edit configuration (generate secure passwords) nano .env ``` **Required configuration in `.env`:** ```bash -# PostgreSQL password (REQUIRED - change this!) +# PostgreSQL password (REQUIRED) POSTGRES_PASSWORD=your_secure_password_here +# JWT Secret (RECOMMENDED - keeps users logged in after restart) +JWT_SECRET=your_jwt_secret_here + +# TOTP Encryption Key (RECOMMENDED - preserves 2FA after restart) +TOTP_ENCRYPTION_KEY=your_totp_key_here + # Optional: Admin account ADMIN_EMAIL=admin@example.com ADMIN_PASSWORD=your_admin_password # Optional: Custom port SERVER_PORT=8080 +``` -# Optional: Security configuration -# Enable URL allowlist validation (false to skip allowlist checks, only basic format validation) -SECURITY_URL_ALLOWLIST_ENABLED=false +**Generate secure secrets:** +```bash +# Generate JWT_SECRET +openssl rand -hex 32 -# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https) -# ⚠️ WARNING: Enabling this allows HTTP (plaintext) URLs which can expose API keys -# Only recommended for: -# - Development/testing environments -# - Internal networks with trusted endpoints -# - When using local test servers (http://localhost) -# PRODUCTION: Keep this false or use HTTPS URLs only -SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false +# Generate TOTP_ENCRYPTION_KEY +openssl rand -hex 32 -# Allow private IP addresses for upstream/pricing/CRS (for internal deployments) -SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false +# Generate POSTGRES_PASSWORD +openssl rand -hex 32 ``` ```bash +# 4. Create data directories (for local version) +mkdir -p data postgres_data redis_data + # 5. Start all services +# Option A: Local directory version (recommended - easy migration) +docker-compose -f docker-compose.local.yml up -d + +# Option B: Named volumes version (simple setup) docker-compose up -d # 6. Check status -docker-compose ps +docker-compose -f docker-compose.local.yml ps # 7. View logs -docker-compose logs -f sub2api +docker-compose -f docker-compose.local.yml logs -f sub2api ``` +#### Deployment Versions + +| Version | Data Storage | Migration | Best For | +|---------|-------------|-----------|----------| +| **docker-compose.local.yml** | Local directories | ✅ Easy (tar entire directory) | Production, frequent backups | +| **docker-compose.yml** | Named volumes | ⚠️ Requires docker commands | Simple setup | + +**Recommendation:** Use `docker-compose.local.yml` (deployed by script) for easier data management. + #### Access Open `http://YOUR_SERVER_IP:8080` in your browser. +If admin password was auto-generated, find it in logs: +```bash +docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password" +``` + #### Upgrade ```bash # Pull latest image and recreate container -docker-compose pull -docker-compose up -d +docker-compose -f docker-compose.local.yml pull +docker-compose -f docker-compose.local.yml up -d +``` + +#### Easy Migration (Local Directory Version) + +When using `docker-compose.local.yml`, migrate to a new server easily: + +```bash +# On source server +docker-compose -f docker-compose.local.yml down +cd .. +tar czf sub2api-complete.tar.gz sub2api-deploy/ + +# Transfer to new server +scp sub2api-complete.tar.gz user@new-server:/path/ + +# On new server +tar xzf sub2api-complete.tar.gz +cd sub2api-deploy/ +docker-compose -f docker-compose.local.yml up -d ``` #### Useful Commands ```bash # Stop all services -docker-compose down +docker-compose -f docker-compose.local.yml down # Restart -docker-compose restart +docker-compose -f docker-compose.local.yml restart # View all logs -docker-compose logs -f +docker-compose -f docker-compose.local.yml logs -f + +# Remove all data (caution!) +docker-compose -f docker-compose.local.yml down +rm -rf data/ postgres_data/ redis_data/ ``` --- diff --git a/README_CN.md b/README_CN.md index 41d399d5..e609f25d 100644 --- a/README_CN.md +++ b/README_CN.md @@ -135,7 +135,7 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install --- -### 方式二:Docker Compose +### 方式二:Docker Compose(推荐) 使用 Docker Compose 部署,包含 PostgreSQL 和 Redis 容器。 @@ -144,87 +144,157 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install - Docker 20.10+ - Docker Compose v2+ -#### 安装步骤 +#### 快速开始(一键部署) + +使用自动化部署脚本快速搭建: + +```bash +# 创建部署目录 +mkdir -p sub2api-deploy && cd sub2api-deploy + +# 下载并运行部署准备脚本 +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash + +# 启动服务 +docker-compose -f docker-compose.local.yml up -d + +# 查看日志 +docker-compose -f docker-compose.local.yml logs -f sub2api +``` + +**脚本功能:** +- 下载 `docker-compose.local.yml` 和 `.env.example` +- 自动生成安全凭证(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD) +- 创建 `.env` 文件并填充自动生成的密钥 +- 创建数据目录(使用本地目录,便于备份和迁移) +- 显示生成的凭证供你记录 + +#### 手动部署 + +如果你希望手动配置: ```bash # 1. 克隆仓库 git clone https://github.com/Wei-Shaw/sub2api.git -cd sub2api +cd sub2api/deploy -# 2. 进入 deploy 目录 -cd deploy - -# 3. 复制环境配置文件 +# 2. 复制环境配置文件 cp .env.example .env -# 4. 编辑配置(设置密码等) +# 3. 编辑配置(生成安全密码) nano .env ``` **`.env` 必须配置项:** ```bash -# PostgreSQL 密码(必须修改!) +# PostgreSQL 密码(必需) POSTGRES_PASSWORD=your_secure_password_here +# JWT 密钥(推荐 - 重启后保持用户登录状态) +JWT_SECRET=your_jwt_secret_here + +# TOTP 加密密钥(推荐 - 重启后保留双因素认证) +TOTP_ENCRYPTION_KEY=your_totp_key_here + # 可选:管理员账号 ADMIN_EMAIL=admin@example.com ADMIN_PASSWORD=your_admin_password # 可选:自定义端口 SERVER_PORT=8080 +``` -# 可选:安全配置 -# 启用 URL 白名单验证(false 则跳过白名单检查,仅做基本格式校验) -SECURITY_URL_ALLOWLIST_ENABLED=false +**生成安全密钥:** +```bash +# 生成 JWT_SECRET +openssl rand -hex 32 -# 关闭白名单时,是否允许 http:// URL(默认 false,只允许 https://) -# ⚠️ 警告:允许 HTTP 会暴露 API 密钥(明文传输) -# 仅建议在以下场景使用: -# - 开发/测试环境 -# - 内部可信网络 -# - 本地测试服务器(http://localhost) -# 生产环境:保持 false 或仅使用 HTTPS URL -SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false +# 生成 TOTP_ENCRYPTION_KEY +openssl rand -hex 32 -# 是否允许私有 IP 地址用于上游/定价/CRS(内网部署时使用) -SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false +# 生成 POSTGRES_PASSWORD +openssl rand -hex 32 ``` ```bash +# 4. 创建数据目录(本地版) +mkdir -p data postgres_data redis_data + # 5. 启动所有服务 +# 选项 A:本地目录版(推荐 - 易于迁移) +docker-compose -f docker-compose.local.yml up -d + +# 选项 B:命名卷版(简单设置) docker-compose up -d # 6. 查看状态 -docker-compose ps +docker-compose -f docker-compose.local.yml ps # 7. 查看日志 -docker-compose logs -f sub2api +docker-compose -f docker-compose.local.yml logs -f sub2api ``` +#### 部署版本对比 + +| 版本 | 数据存储 | 迁移便利性 | 适用场景 | +|------|---------|-----------|---------| +| **docker-compose.local.yml** | 本地目录 | ✅ 简单(打包整个目录) | 生产环境、频繁备份 | +| **docker-compose.yml** | 命名卷 | ⚠️ 需要 docker 命令 | 简单设置 | + +**推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。 + #### 访问 在浏览器中打开 `http://你的服务器IP:8080` +如果管理员密码是自动生成的,在日志中查找: +```bash +docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password" +``` + #### 升级 ```bash # 拉取最新镜像并重建容器 -docker-compose pull -docker-compose up -d +docker-compose -f docker-compose.local.yml pull +docker-compose -f docker-compose.local.yml up -d +``` + +#### 轻松迁移(本地目录版) + +使用 `docker-compose.local.yml` 时,可以轻松迁移到新服务器: + +```bash +# 源服务器 +docker-compose -f docker-compose.local.yml down +cd .. +tar czf sub2api-complete.tar.gz sub2api-deploy/ + +# 传输到新服务器 +scp sub2api-complete.tar.gz user@new-server:/path/ + +# 新服务器 +tar xzf sub2api-complete.tar.gz +cd sub2api-deploy/ +docker-compose -f docker-compose.local.yml up -d ``` #### 常用命令 ```bash # 停止所有服务 -docker-compose down +docker-compose -f docker-compose.local.yml down # 重启 -docker-compose restart +docker-compose -f docker-compose.local.yml restart # 查看所有日志 -docker-compose logs -f +docker-compose -f docker-compose.local.yml logs -f + +# 删除所有数据(谨慎!) +docker-compose -f docker-compose.local.yml down +rm -rf data/ postgres_data/ redis_data/ ``` --- diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 79e0dd8a..a2d633db 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.46 +0.1.61 diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go index c9dc57bb..f8a7d313 100644 --- a/backend/cmd/server/main.go +++ b/backend/cmd/server/main.go @@ -8,6 +8,7 @@ import ( "errors" "flag" "log" + "log/slog" "net/http" "os" "os/signal" @@ -44,7 +45,25 @@ func init() { } } +// initLogger configures the default slog handler based on gin.Mode(). +// In non-release mode, Debug level logs are enabled. +func initLogger() { + var level slog.Level + if gin.Mode() == gin.ReleaseMode { + level = slog.LevelInfo + } else { + level = slog.LevelDebug + } + handler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: level, + }) + slog.SetDefault(slog.New(handler)) +} + func main() { + // Initialize slog logger based on gin mode + initLogger() + // Parse command line flags setupMode := flag.Bool("setup", false, "Run setup wizard in CLI mode") showVersion := flag.Bool("version", false, "Show version information") diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index 0a5f9744..d9ff788e 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -70,6 +70,8 @@ func provideCleanup( schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, accountExpiry *service.AccountExpiryService, + subscriptionExpiry *service.SubscriptionExpiryService, + usageCleanup *service.UsageCleanupService, pricing *service.PricingService, emailQueue *service.EmailQueueService, billingCache *service.BillingCacheService, @@ -123,6 +125,12 @@ func provideCleanup( } return nil }}, + {"UsageCleanupService", func() error { + if usageCleanup != nil { + usageCleanup.Stop() + } + return nil + }}, {"TokenRefreshService", func() error { tokenRefresh.Stop() return nil @@ -131,6 +139,10 @@ func provideCleanup( accountExpiry.Stop() return nil }}, + {"SubscriptionExpiryService", func() error { + subscriptionExpiry.Stop() + return nil + }}, {"PricingService", func() error { pricing.Stop() return nil diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 31e47332..9ccbddc2 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -63,7 +63,13 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator) authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService) userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator) - authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService) + secretEncryptor, err := repository.NewAESEncryptor(configConfig) + if err != nil { + return nil, err + } + totpCache := repository.NewTotpCache(redisClient) + totpService := service.NewTotpService(userRepository, secretEncryptor, totpCache, settingService, emailService, emailQueueService) + authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService, totpService) userHandler := handler.NewUserHandler(userService) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) usageLogRepository := repository.NewUsageLogRepository(client, db) @@ -75,6 +81,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator) redeemHandler := handler.NewRedeemHandler(redeemService) subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService) + announcementRepository := repository.NewAnnouncementRepository(client) + announcementReadRepository := repository.NewAnnouncementReadRepository(client) + announcementService := service.NewAnnouncementService(announcementRepository, announcementReadRepository, userRepository, userSubscriptionRepository) + announcementHandler := handler.NewAnnouncementHandler(announcementService) dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db) dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig) dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig) @@ -84,7 +94,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { } dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig) dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService) - accountRepository := repository.NewAccountRepository(client, db) + schedulerCache := repository.NewSchedulerCache(redisClient) + accountRepository := repository.NewAccountRepository(client, db, schedulerCache) proxyRepository := repository.NewProxyRepository(client, db) proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig) proxyLatencyCache := repository.NewProxyLatencyCache(redisClient) @@ -105,21 +116,23 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { geminiTokenCache := repository.NewGeminiTokenCache(redisClient) compositeTokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache) rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, compositeTokenCacheInvalidator) - claudeUsageFetcher := repository.NewClaudeUsageFetcher() + httpUpstream := repository.NewHTTPUpstream(configConfig) + claudeUsageFetcher := repository.NewClaudeUsageFetcher(httpUpstream) antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository) usageCache := service.NewUsageCache() - accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache) + identityCache := repository.NewIdentityCache(redisClient) + accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache, identityCache) geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService) gatewayCache := repository.NewGatewayCache(redisClient) antigravityTokenProvider := service.NewAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService) - httpUpstream := repository.NewHTTPUpstream(configConfig) antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, antigravityTokenProvider, rateLimitService, httpUpstream, settingService) accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig) concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig) concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig) crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig) sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig) - accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache) + accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, compositeTokenCacheInvalidator) + adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService) oAuthHandler := admin.NewOAuthHandler(oAuthService) openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService) geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService) @@ -128,7 +141,6 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { adminRedeemHandler := admin.NewRedeemHandler(adminService) promoHandler := admin.NewPromoHandler(promoService) opsRepository := repository.NewOpsRepository(db) - schedulerCache := repository.NewSchedulerCache(redisClient) schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db) schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig) pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig) @@ -137,7 +149,6 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { return nil, err } billingService := service.NewBillingService(configConfig, pricingService) - identityCache := repository.NewIdentityCache(redisClient) identityService := service.NewIdentityService(identityCache) deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) claudeTokenProvider := service.NewClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService) @@ -154,16 +165,19 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo) systemHandler := handler.ProvideSystemHandler(updateService) adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) - adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) + usageCleanupRepository := repository.NewUsageCleanupRepository(client, db) + usageCleanupService := service.ProvideUsageCleanupService(usageCleanupRepository, timingWheelService, dashboardAggregationService, configConfig) + adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService, usageCleanupService) userAttributeDefinitionRepository := repository.NewUserAttributeDefinitionRepository(client) userAttributeValueRepository := repository.NewUserAttributeValueRepository(client) userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository) userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) - gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) + gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, configConfig) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) - handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler) + totpHandler := handler.NewTotpHandler(totpService) + handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler) jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService) adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService) apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig) @@ -174,9 +188,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig) opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig) opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) - tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, configConfig) + tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig) accountExpiryService := service.ProvideAccountExpiryService(accountRepository) - v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) + subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository) + v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) application := &Application{ Server: httpServer, Cleanup: v, @@ -209,6 +224,8 @@ func provideCleanup( schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, accountExpiry *service.AccountExpiryService, + subscriptionExpiry *service.SubscriptionExpiryService, + usageCleanup *service.UsageCleanupService, pricing *service.PricingService, emailQueue *service.EmailQueueService, billingCache *service.BillingCacheService, @@ -261,6 +278,12 @@ func provideCleanup( } return nil }}, + {"UsageCleanupService", func() error { + if usageCleanup != nil { + usageCleanup.Stop() + } + return nil + }}, {"TokenRefreshService", func() error { tokenRefresh.Stop() return nil @@ -269,6 +292,10 @@ func provideCleanup( accountExpiry.Stop() return nil }}, + {"SubscriptionExpiryService", func() error { + subscriptionExpiry.Stop() + return nil + }}, {"PricingService", func() error { pricing.Stop() return nil diff --git a/backend/ent/announcement.go b/backend/ent/announcement.go new file mode 100644 index 00000000..93d7a375 --- /dev/null +++ b/backend/ent/announcement.go @@ -0,0 +1,249 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/internal/domain" +) + +// Announcement is the model entity for the Announcement schema. +type Announcement struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // 公告标题 + Title string `json:"title,omitempty"` + // 公告内容(支持 Markdown) + Content string `json:"content,omitempty"` + // 状态: draft, active, archived + Status string `json:"status,omitempty"` + // 展示条件(JSON 规则) + Targeting domain.AnnouncementTargeting `json:"targeting,omitempty"` + // 开始展示时间(为空表示立即生效) + StartsAt *time.Time `json:"starts_at,omitempty"` + // 结束展示时间(为空表示永久生效) + EndsAt *time.Time `json:"ends_at,omitempty"` + // 创建人用户ID(管理员) + CreatedBy *int64 `json:"created_by,omitempty"` + // 更新人用户ID(管理员) + UpdatedBy *int64 `json:"updated_by,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AnnouncementQuery when eager-loading is set. + Edges AnnouncementEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AnnouncementEdges holds the relations/edges for other nodes in the graph. +type AnnouncementEdges struct { + // Reads holds the value of the reads edge. + Reads []*AnnouncementRead `json:"reads,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// ReadsOrErr returns the Reads value or an error if the edge +// was not loaded in eager-loading. +func (e AnnouncementEdges) ReadsOrErr() ([]*AnnouncementRead, error) { + if e.loadedTypes[0] { + return e.Reads, nil + } + return nil, &NotLoadedError{edge: "reads"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Announcement) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case announcement.FieldTargeting: + values[i] = new([]byte) + case announcement.FieldID, announcement.FieldCreatedBy, announcement.FieldUpdatedBy: + values[i] = new(sql.NullInt64) + case announcement.FieldTitle, announcement.FieldContent, announcement.FieldStatus: + values[i] = new(sql.NullString) + case announcement.FieldStartsAt, announcement.FieldEndsAt, announcement.FieldCreatedAt, announcement.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Announcement fields. +func (_m *Announcement) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case announcement.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case announcement.FieldTitle: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field title", values[i]) + } else if value.Valid { + _m.Title = value.String + } + case announcement.FieldContent: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field content", values[i]) + } else if value.Valid { + _m.Content = value.String + } + case announcement.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case announcement.FieldTargeting: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field targeting", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Targeting); err != nil { + return fmt.Errorf("unmarshal field targeting: %w", err) + } + } + case announcement.FieldStartsAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field starts_at", values[i]) + } else if value.Valid { + _m.StartsAt = new(time.Time) + *_m.StartsAt = value.Time + } + case announcement.FieldEndsAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field ends_at", values[i]) + } else if value.Valid { + _m.EndsAt = new(time.Time) + *_m.EndsAt = value.Time + } + case announcement.FieldCreatedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + _m.CreatedBy = new(int64) + *_m.CreatedBy = value.Int64 + } + case announcement.FieldUpdatedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field updated_by", values[i]) + } else if value.Valid { + _m.UpdatedBy = new(int64) + *_m.UpdatedBy = value.Int64 + } + case announcement.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case announcement.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Announcement. +// This includes values selected through modifiers, order, etc. +func (_m *Announcement) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryReads queries the "reads" edge of the Announcement entity. +func (_m *Announcement) QueryReads() *AnnouncementReadQuery { + return NewAnnouncementClient(_m.config).QueryReads(_m) +} + +// Update returns a builder for updating this Announcement. +// Note that you need to call Announcement.Unwrap() before calling this method if this Announcement +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Announcement) Update() *AnnouncementUpdateOne { + return NewAnnouncementClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Announcement entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Announcement) Unwrap() *Announcement { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Announcement is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Announcement) String() string { + var builder strings.Builder + builder.WriteString("Announcement(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("title=") + builder.WriteString(_m.Title) + builder.WriteString(", ") + builder.WriteString("content=") + builder.WriteString(_m.Content) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("targeting=") + builder.WriteString(fmt.Sprintf("%v", _m.Targeting)) + builder.WriteString(", ") + if v := _m.StartsAt; v != nil { + builder.WriteString("starts_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.EndsAt; v != nil { + builder.WriteString("ends_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.CreatedBy; v != nil { + builder.WriteString("created_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.UpdatedBy; v != nil { + builder.WriteString("updated_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Announcements is a parsable slice of Announcement. +type Announcements []*Announcement diff --git a/backend/ent/announcement/announcement.go b/backend/ent/announcement/announcement.go new file mode 100644 index 00000000..4f34ee05 --- /dev/null +++ b/backend/ent/announcement/announcement.go @@ -0,0 +1,164 @@ +// Code generated by ent, DO NOT EDIT. + +package announcement + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the announcement type in the database. + Label = "announcement" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldTitle holds the string denoting the title field in the database. + FieldTitle = "title" + // FieldContent holds the string denoting the content field in the database. + FieldContent = "content" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldTargeting holds the string denoting the targeting field in the database. + FieldTargeting = "targeting" + // FieldStartsAt holds the string denoting the starts_at field in the database. + FieldStartsAt = "starts_at" + // FieldEndsAt holds the string denoting the ends_at field in the database. + FieldEndsAt = "ends_at" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // FieldUpdatedBy holds the string denoting the updated_by field in the database. + FieldUpdatedBy = "updated_by" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeReads holds the string denoting the reads edge name in mutations. + EdgeReads = "reads" + // Table holds the table name of the announcement in the database. + Table = "announcements" + // ReadsTable is the table that holds the reads relation/edge. + ReadsTable = "announcement_reads" + // ReadsInverseTable is the table name for the AnnouncementRead entity. + // It exists in this package in order to avoid circular dependency with the "announcementread" package. + ReadsInverseTable = "announcement_reads" + // ReadsColumn is the table column denoting the reads relation/edge. + ReadsColumn = "announcement_id" +) + +// Columns holds all SQL columns for announcement fields. +var Columns = []string{ + FieldID, + FieldTitle, + FieldContent, + FieldStatus, + FieldTargeting, + FieldStartsAt, + FieldEndsAt, + FieldCreatedBy, + FieldUpdatedBy, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // TitleValidator is a validator for the "title" field. It is called by the builders before save. + TitleValidator func(string) error + // ContentValidator is a validator for the "content" field. It is called by the builders before save. + ContentValidator func(string) error + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Announcement queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByTitle orders the results by the title field. +func ByTitle(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTitle, opts...).ToFunc() +} + +// ByContent orders the results by the content field. +func ByContent(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldContent, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByStartsAt orders the results by the starts_at field. +func ByStartsAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartsAt, opts...).ToFunc() +} + +// ByEndsAt orders the results by the ends_at field. +func ByEndsAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndsAt, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByUpdatedBy orders the results by the updated_by field. +func ByUpdatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedBy, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByReadsCount orders the results by reads count. +func ByReadsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newReadsStep(), opts...) + } +} + +// ByReads orders the results by reads terms. +func ByReads(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newReadsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newReadsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ReadsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ReadsTable, ReadsColumn), + ) +} diff --git a/backend/ent/announcement/where.go b/backend/ent/announcement/where.go new file mode 100644 index 00000000..d3cad2a5 --- /dev/null +++ b/backend/ent/announcement/where.go @@ -0,0 +1,624 @@ +// Code generated by ent, DO NOT EDIT. + +package announcement + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldID, id)) +} + +// Title applies equality check predicate on the "title" field. It's identical to TitleEQ. +func Title(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldTitle, v)) +} + +// Content applies equality check predicate on the "content" field. It's identical to ContentEQ. +func Content(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldContent, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStatus, v)) +} + +// StartsAt applies equality check predicate on the "starts_at" field. It's identical to StartsAtEQ. +func StartsAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v)) +} + +// EndsAt applies equality check predicate on the "ends_at" field. It's identical to EndsAtEQ. +func EndsAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldEndsAt, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedBy, v)) +} + +// UpdatedBy applies equality check predicate on the "updated_by" field. It's identical to UpdatedByEQ. +func UpdatedBy(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// TitleEQ applies the EQ predicate on the "title" field. +func TitleEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldTitle, v)) +} + +// TitleNEQ applies the NEQ predicate on the "title" field. +func TitleNEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldTitle, v)) +} + +// TitleIn applies the In predicate on the "title" field. +func TitleIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldTitle, vs...)) +} + +// TitleNotIn applies the NotIn predicate on the "title" field. +func TitleNotIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldTitle, vs...)) +} + +// TitleGT applies the GT predicate on the "title" field. +func TitleGT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldTitle, v)) +} + +// TitleGTE applies the GTE predicate on the "title" field. +func TitleGTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldTitle, v)) +} + +// TitleLT applies the LT predicate on the "title" field. +func TitleLT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldTitle, v)) +} + +// TitleLTE applies the LTE predicate on the "title" field. +func TitleLTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldTitle, v)) +} + +// TitleContains applies the Contains predicate on the "title" field. +func TitleContains(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContains(FieldTitle, v)) +} + +// TitleHasPrefix applies the HasPrefix predicate on the "title" field. +func TitleHasPrefix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasPrefix(FieldTitle, v)) +} + +// TitleHasSuffix applies the HasSuffix predicate on the "title" field. +func TitleHasSuffix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasSuffix(FieldTitle, v)) +} + +// TitleEqualFold applies the EqualFold predicate on the "title" field. +func TitleEqualFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEqualFold(FieldTitle, v)) +} + +// TitleContainsFold applies the ContainsFold predicate on the "title" field. +func TitleContainsFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContainsFold(FieldTitle, v)) +} + +// ContentEQ applies the EQ predicate on the "content" field. +func ContentEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldContent, v)) +} + +// ContentNEQ applies the NEQ predicate on the "content" field. +func ContentNEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldContent, v)) +} + +// ContentIn applies the In predicate on the "content" field. +func ContentIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldContent, vs...)) +} + +// ContentNotIn applies the NotIn predicate on the "content" field. +func ContentNotIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldContent, vs...)) +} + +// ContentGT applies the GT predicate on the "content" field. +func ContentGT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldContent, v)) +} + +// ContentGTE applies the GTE predicate on the "content" field. +func ContentGTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldContent, v)) +} + +// ContentLT applies the LT predicate on the "content" field. +func ContentLT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldContent, v)) +} + +// ContentLTE applies the LTE predicate on the "content" field. +func ContentLTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldContent, v)) +} + +// ContentContains applies the Contains predicate on the "content" field. +func ContentContains(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContains(FieldContent, v)) +} + +// ContentHasPrefix applies the HasPrefix predicate on the "content" field. +func ContentHasPrefix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasPrefix(FieldContent, v)) +} + +// ContentHasSuffix applies the HasSuffix predicate on the "content" field. +func ContentHasSuffix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasSuffix(FieldContent, v)) +} + +// ContentEqualFold applies the EqualFold predicate on the "content" field. +func ContentEqualFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEqualFold(FieldContent, v)) +} + +// ContentContainsFold applies the ContainsFold predicate on the "content" field. +func ContentContainsFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContainsFold(FieldContent, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Announcement { + return predicate.Announcement(sql.FieldContainsFold(FieldStatus, v)) +} + +// TargetingIsNil applies the IsNil predicate on the "targeting" field. +func TargetingIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldTargeting)) +} + +// TargetingNotNil applies the NotNil predicate on the "targeting" field. +func TargetingNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldTargeting)) +} + +// StartsAtEQ applies the EQ predicate on the "starts_at" field. +func StartsAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v)) +} + +// StartsAtNEQ applies the NEQ predicate on the "starts_at" field. +func StartsAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldStartsAt, v)) +} + +// StartsAtIn applies the In predicate on the "starts_at" field. +func StartsAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldStartsAt, vs...)) +} + +// StartsAtNotIn applies the NotIn predicate on the "starts_at" field. +func StartsAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldStartsAt, vs...)) +} + +// StartsAtGT applies the GT predicate on the "starts_at" field. +func StartsAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldStartsAt, v)) +} + +// StartsAtGTE applies the GTE predicate on the "starts_at" field. +func StartsAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldStartsAt, v)) +} + +// StartsAtLT applies the LT predicate on the "starts_at" field. +func StartsAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldStartsAt, v)) +} + +// StartsAtLTE applies the LTE predicate on the "starts_at" field. +func StartsAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldStartsAt, v)) +} + +// StartsAtIsNil applies the IsNil predicate on the "starts_at" field. +func StartsAtIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldStartsAt)) +} + +// StartsAtNotNil applies the NotNil predicate on the "starts_at" field. +func StartsAtNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldStartsAt)) +} + +// EndsAtEQ applies the EQ predicate on the "ends_at" field. +func EndsAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldEndsAt, v)) +} + +// EndsAtNEQ applies the NEQ predicate on the "ends_at" field. +func EndsAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldEndsAt, v)) +} + +// EndsAtIn applies the In predicate on the "ends_at" field. +func EndsAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldEndsAt, vs...)) +} + +// EndsAtNotIn applies the NotIn predicate on the "ends_at" field. +func EndsAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldEndsAt, vs...)) +} + +// EndsAtGT applies the GT predicate on the "ends_at" field. +func EndsAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldEndsAt, v)) +} + +// EndsAtGTE applies the GTE predicate on the "ends_at" field. +func EndsAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldEndsAt, v)) +} + +// EndsAtLT applies the LT predicate on the "ends_at" field. +func EndsAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldEndsAt, v)) +} + +// EndsAtLTE applies the LTE predicate on the "ends_at" field. +func EndsAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldEndsAt, v)) +} + +// EndsAtIsNil applies the IsNil predicate on the "ends_at" field. +func EndsAtIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldEndsAt)) +} + +// EndsAtNotNil applies the NotNil predicate on the "ends_at" field. +func EndsAtNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldEndsAt)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldCreatedBy, v)) +} + +// CreatedByIsNil applies the IsNil predicate on the "created_by" field. +func CreatedByIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldCreatedBy)) +} + +// CreatedByNotNil applies the NotNil predicate on the "created_by" field. +func CreatedByNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldCreatedBy)) +} + +// UpdatedByEQ applies the EQ predicate on the "updated_by" field. +func UpdatedByEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedBy, v)) +} + +// UpdatedByNEQ applies the NEQ predicate on the "updated_by" field. +func UpdatedByNEQ(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldUpdatedBy, v)) +} + +// UpdatedByIn applies the In predicate on the "updated_by" field. +func UpdatedByIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByNotIn applies the NotIn predicate on the "updated_by" field. +func UpdatedByNotIn(vs ...int64) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldUpdatedBy, vs...)) +} + +// UpdatedByGT applies the GT predicate on the "updated_by" field. +func UpdatedByGT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldUpdatedBy, v)) +} + +// UpdatedByGTE applies the GTE predicate on the "updated_by" field. +func UpdatedByGTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldUpdatedBy, v)) +} + +// UpdatedByLT applies the LT predicate on the "updated_by" field. +func UpdatedByLT(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldUpdatedBy, v)) +} + +// UpdatedByLTE applies the LTE predicate on the "updated_by" field. +func UpdatedByLTE(v int64) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldUpdatedBy, v)) +} + +// UpdatedByIsNil applies the IsNil predicate on the "updated_by" field. +func UpdatedByIsNil() predicate.Announcement { + return predicate.Announcement(sql.FieldIsNull(FieldUpdatedBy)) +} + +// UpdatedByNotNil applies the NotNil predicate on the "updated_by" field. +func UpdatedByNotNil() predicate.Announcement { + return predicate.Announcement(sql.FieldNotNull(FieldUpdatedBy)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Announcement { + return predicate.Announcement(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasReads applies the HasEdge predicate on the "reads" edge. +func HasReads() predicate.Announcement { + return predicate.Announcement(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ReadsTable, ReadsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasReadsWith applies the HasEdge predicate on the "reads" edge with a given conditions (other predicates). +func HasReadsWith(preds ...predicate.AnnouncementRead) predicate.Announcement { + return predicate.Announcement(func(s *sql.Selector) { + step := newReadsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Announcement) predicate.Announcement { + return predicate.Announcement(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Announcement) predicate.Announcement { + return predicate.Announcement(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Announcement) predicate.Announcement { + return predicate.Announcement(sql.NotPredicates(p)) +} diff --git a/backend/ent/announcement_create.go b/backend/ent/announcement_create.go new file mode 100644 index 00000000..151d4c11 --- /dev/null +++ b/backend/ent/announcement_create.go @@ -0,0 +1,1159 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/internal/domain" +) + +// AnnouncementCreate is the builder for creating a Announcement entity. +type AnnouncementCreate struct { + config + mutation *AnnouncementMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetTitle sets the "title" field. +func (_c *AnnouncementCreate) SetTitle(v string) *AnnouncementCreate { + _c.mutation.SetTitle(v) + return _c +} + +// SetContent sets the "content" field. +func (_c *AnnouncementCreate) SetContent(v string) *AnnouncementCreate { + _c.mutation.SetContent(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *AnnouncementCreate) SetStatus(v string) *AnnouncementCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableStatus(v *string) *AnnouncementCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetTargeting sets the "targeting" field. +func (_c *AnnouncementCreate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementCreate { + _c.mutation.SetTargeting(v) + return _c +} + +// SetNillableTargeting sets the "targeting" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementCreate { + if v != nil { + _c.SetTargeting(*v) + } + return _c +} + +// SetStartsAt sets the "starts_at" field. +func (_c *AnnouncementCreate) SetStartsAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetStartsAt(v) + return _c +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableStartsAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetStartsAt(*v) + } + return _c +} + +// SetEndsAt sets the "ends_at" field. +func (_c *AnnouncementCreate) SetEndsAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetEndsAt(v) + return _c +} + +// SetNillableEndsAt sets the "ends_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableEndsAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetEndsAt(*v) + } + return _c +} + +// SetCreatedBy sets the "created_by" field. +func (_c *AnnouncementCreate) SetCreatedBy(v int64) *AnnouncementCreate { + _c.mutation.SetCreatedBy(v) + return _c +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableCreatedBy(v *int64) *AnnouncementCreate { + if v != nil { + _c.SetCreatedBy(*v) + } + return _c +} + +// SetUpdatedBy sets the "updated_by" field. +func (_c *AnnouncementCreate) SetUpdatedBy(v int64) *AnnouncementCreate { + _c.mutation.SetUpdatedBy(v) + return _c +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableUpdatedBy(v *int64) *AnnouncementCreate { + if v != nil { + _c.SetUpdatedBy(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AnnouncementCreate) SetCreatedAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableCreatedAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *AnnouncementCreate) SetUpdatedAt(v time.Time) *AnnouncementCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *AnnouncementCreate) SetNillableUpdatedAt(v *time.Time) *AnnouncementCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs. +func (_c *AnnouncementCreate) AddReadIDs(ids ...int64) *AnnouncementCreate { + _c.mutation.AddReadIDs(ids...) + return _c +} + +// AddReads adds the "reads" edges to the AnnouncementRead entity. +func (_c *AnnouncementCreate) AddReads(v ...*AnnouncementRead) *AnnouncementCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddReadIDs(ids...) +} + +// Mutation returns the AnnouncementMutation object of the builder. +func (_c *AnnouncementCreate) Mutation() *AnnouncementMutation { + return _c.mutation +} + +// Save creates the Announcement in the database. +func (_c *AnnouncementCreate) Save(ctx context.Context) (*Announcement, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AnnouncementCreate) SaveX(ctx context.Context) *Announcement { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AnnouncementCreate) defaults() { + if _, ok := _c.mutation.Status(); !ok { + v := announcement.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := announcement.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := announcement.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AnnouncementCreate) check() error { + if _, ok := _c.mutation.Title(); !ok { + return &ValidationError{Name: "title", err: errors.New(`ent: missing required field "Announcement.title"`)} + } + if v, ok := _c.mutation.Title(); ok { + if err := announcement.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)} + } + } + if _, ok := _c.mutation.Content(); !ok { + return &ValidationError{Name: "content", err: errors.New(`ent: missing required field "Announcement.content"`)} + } + if v, ok := _c.mutation.Content(); ok { + if err := announcement.ContentValidator(v); err != nil { + return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Announcement.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := announcement.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Announcement.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Announcement.updated_at"`)} + } + return nil +} + +func (_c *AnnouncementCreate) sqlSave(ctx context.Context) (*Announcement, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *AnnouncementCreate) createSpec() (*Announcement, *sqlgraph.CreateSpec) { + var ( + _node = &Announcement{config: _c.config} + _spec = sqlgraph.NewCreateSpec(announcement.Table, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Title(); ok { + _spec.SetField(announcement.FieldTitle, field.TypeString, value) + _node.Title = value + } + if value, ok := _c.mutation.Content(); ok { + _spec.SetField(announcement.FieldContent, field.TypeString, value) + _node.Content = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(announcement.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Targeting(); ok { + _spec.SetField(announcement.FieldTargeting, field.TypeJSON, value) + _node.Targeting = value + } + if value, ok := _c.mutation.StartsAt(); ok { + _spec.SetField(announcement.FieldStartsAt, field.TypeTime, value) + _node.StartsAt = &value + } + if value, ok := _c.mutation.EndsAt(); ok { + _spec.SetField(announcement.FieldEndsAt, field.TypeTime, value) + _node.EndsAt = &value + } + if value, ok := _c.mutation.CreatedBy(); ok { + _spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value) + _node.CreatedBy = &value + } + if value, ok := _c.mutation.UpdatedBy(); ok { + _spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value) + _node.UpdatedBy = &value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(announcement.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := _c.mutation.ReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Announcement.Create(). +// SetTitle(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementUpsert) { +// SetTitle(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementCreate) OnConflict(opts ...sql.ConflictOption) *AnnouncementUpsertOne { + _c.conflict = opts + return &AnnouncementUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementCreate) OnConflictColumns(columns ...string) *AnnouncementUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementUpsertOne{ + create: _c, + } +} + +type ( + // AnnouncementUpsertOne is the builder for "upsert"-ing + // one Announcement node. + AnnouncementUpsertOne struct { + create *AnnouncementCreate + } + + // AnnouncementUpsert is the "OnConflict" setter. + AnnouncementUpsert struct { + *sql.UpdateSet + } +) + +// SetTitle sets the "title" field. +func (u *AnnouncementUpsert) SetTitle(v string) *AnnouncementUpsert { + u.Set(announcement.FieldTitle, v) + return u +} + +// UpdateTitle sets the "title" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateTitle() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldTitle) + return u +} + +// SetContent sets the "content" field. +func (u *AnnouncementUpsert) SetContent(v string) *AnnouncementUpsert { + u.Set(announcement.FieldContent, v) + return u +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateContent() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldContent) + return u +} + +// SetStatus sets the "status" field. +func (u *AnnouncementUpsert) SetStatus(v string) *AnnouncementUpsert { + u.Set(announcement.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateStatus() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldStatus) + return u +} + +// SetTargeting sets the "targeting" field. +func (u *AnnouncementUpsert) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsert { + u.Set(announcement.FieldTargeting, v) + return u +} + +// UpdateTargeting sets the "targeting" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateTargeting() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldTargeting) + return u +} + +// ClearTargeting clears the value of the "targeting" field. +func (u *AnnouncementUpsert) ClearTargeting() *AnnouncementUpsert { + u.SetNull(announcement.FieldTargeting) + return u +} + +// SetStartsAt sets the "starts_at" field. +func (u *AnnouncementUpsert) SetStartsAt(v time.Time) *AnnouncementUpsert { + u.Set(announcement.FieldStartsAt, v) + return u +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateStartsAt() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldStartsAt) + return u +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (u *AnnouncementUpsert) ClearStartsAt() *AnnouncementUpsert { + u.SetNull(announcement.FieldStartsAt) + return u +} + +// SetEndsAt sets the "ends_at" field. +func (u *AnnouncementUpsert) SetEndsAt(v time.Time) *AnnouncementUpsert { + u.Set(announcement.FieldEndsAt, v) + return u +} + +// UpdateEndsAt sets the "ends_at" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateEndsAt() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldEndsAt) + return u +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (u *AnnouncementUpsert) ClearEndsAt() *AnnouncementUpsert { + u.SetNull(announcement.FieldEndsAt) + return u +} + +// SetCreatedBy sets the "created_by" field. +func (u *AnnouncementUpsert) SetCreatedBy(v int64) *AnnouncementUpsert { + u.Set(announcement.FieldCreatedBy, v) + return u +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateCreatedBy() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldCreatedBy) + return u +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *AnnouncementUpsert) AddCreatedBy(v int64) *AnnouncementUpsert { + u.Add(announcement.FieldCreatedBy, v) + return u +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (u *AnnouncementUpsert) ClearCreatedBy() *AnnouncementUpsert { + u.SetNull(announcement.FieldCreatedBy) + return u +} + +// SetUpdatedBy sets the "updated_by" field. +func (u *AnnouncementUpsert) SetUpdatedBy(v int64) *AnnouncementUpsert { + u.Set(announcement.FieldUpdatedBy, v) + return u +} + +// UpdateUpdatedBy sets the "updated_by" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateUpdatedBy() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldUpdatedBy) + return u +} + +// AddUpdatedBy adds v to the "updated_by" field. +func (u *AnnouncementUpsert) AddUpdatedBy(v int64) *AnnouncementUpsert { + u.Add(announcement.FieldUpdatedBy, v) + return u +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (u *AnnouncementUpsert) ClearUpdatedBy() *AnnouncementUpsert { + u.SetNull(announcement.FieldUpdatedBy) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AnnouncementUpsert) SetUpdatedAt(v time.Time) *AnnouncementUpsert { + u.Set(announcement.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AnnouncementUpsert) UpdateUpdatedAt() *AnnouncementUpsert { + u.SetExcluded(announcement.FieldUpdatedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementUpsertOne) UpdateNewValues() *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(announcement.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementUpsertOne) Ignore() *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementUpsertOne) DoNothing() *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementCreate.OnConflict +// documentation for more info. +func (u *AnnouncementUpsertOne) Update(set func(*AnnouncementUpsert)) *AnnouncementUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementUpsert{UpdateSet: update}) + })) + return u +} + +// SetTitle sets the "title" field. +func (u *AnnouncementUpsertOne) SetTitle(v string) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTitle(v) + }) +} + +// UpdateTitle sets the "title" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateTitle() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTitle() + }) +} + +// SetContent sets the "content" field. +func (u *AnnouncementUpsertOne) SetContent(v string) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetContent(v) + }) +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateContent() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateContent() + }) +} + +// SetStatus sets the "status" field. +func (u *AnnouncementUpsertOne) SetStatus(v string) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateStatus() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStatus() + }) +} + +// SetTargeting sets the "targeting" field. +func (u *AnnouncementUpsertOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTargeting(v) + }) +} + +// UpdateTargeting sets the "targeting" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateTargeting() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTargeting() + }) +} + +// ClearTargeting clears the value of the "targeting" field. +func (u *AnnouncementUpsertOne) ClearTargeting() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearTargeting() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *AnnouncementUpsertOne) SetStartsAt(v time.Time) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateStartsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStartsAt() + }) +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (u *AnnouncementUpsertOne) ClearStartsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearStartsAt() + }) +} + +// SetEndsAt sets the "ends_at" field. +func (u *AnnouncementUpsertOne) SetEndsAt(v time.Time) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetEndsAt(v) + }) +} + +// UpdateEndsAt sets the "ends_at" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateEndsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateEndsAt() + }) +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (u *AnnouncementUpsertOne) ClearEndsAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearEndsAt() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *AnnouncementUpsertOne) SetCreatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *AnnouncementUpsertOne) AddCreatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateCreatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateCreatedBy() + }) +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (u *AnnouncementUpsertOne) ClearCreatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearCreatedBy() + }) +} + +// SetUpdatedBy sets the "updated_by" field. +func (u *AnnouncementUpsertOne) SetUpdatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedBy(v) + }) +} + +// AddUpdatedBy adds v to the "updated_by" field. +func (u *AnnouncementUpsertOne) AddUpdatedBy(v int64) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.AddUpdatedBy(v) + }) +} + +// UpdateUpdatedBy sets the "updated_by" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateUpdatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedBy() + }) +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (u *AnnouncementUpsertOne) ClearUpdatedBy() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearUpdatedBy() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AnnouncementUpsertOne) SetUpdatedAt(v time.Time) *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AnnouncementUpsertOne) UpdateUpdatedAt() *AnnouncementUpsertOne { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *AnnouncementUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *AnnouncementUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// AnnouncementCreateBulk is the builder for creating many Announcement entities in bulk. +type AnnouncementCreateBulk struct { + config + err error + builders []*AnnouncementCreate + conflict []sql.ConflictOption +} + +// Save creates the Announcement entities in the database. +func (_c *AnnouncementCreateBulk) Save(ctx context.Context) ([]*Announcement, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Announcement, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AnnouncementMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AnnouncementCreateBulk) SaveX(ctx context.Context) []*Announcement { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Announcement.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementUpsert) { +// SetTitle(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementCreateBulk) OnConflict(opts ...sql.ConflictOption) *AnnouncementUpsertBulk { + _c.conflict = opts + return &AnnouncementUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementCreateBulk) OnConflictColumns(columns ...string) *AnnouncementUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementUpsertBulk{ + create: _c, + } +} + +// AnnouncementUpsertBulk is the builder for "upsert"-ing +// a bulk of Announcement nodes. +type AnnouncementUpsertBulk struct { + create *AnnouncementCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementUpsertBulk) UpdateNewValues() *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(announcement.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Announcement.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementUpsertBulk) Ignore() *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementUpsertBulk) DoNothing() *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementCreateBulk.OnConflict +// documentation for more info. +func (u *AnnouncementUpsertBulk) Update(set func(*AnnouncementUpsert)) *AnnouncementUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementUpsert{UpdateSet: update}) + })) + return u +} + +// SetTitle sets the "title" field. +func (u *AnnouncementUpsertBulk) SetTitle(v string) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTitle(v) + }) +} + +// UpdateTitle sets the "title" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateTitle() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTitle() + }) +} + +// SetContent sets the "content" field. +func (u *AnnouncementUpsertBulk) SetContent(v string) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetContent(v) + }) +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateContent() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateContent() + }) +} + +// SetStatus sets the "status" field. +func (u *AnnouncementUpsertBulk) SetStatus(v string) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateStatus() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStatus() + }) +} + +// SetTargeting sets the "targeting" field. +func (u *AnnouncementUpsertBulk) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetTargeting(v) + }) +} + +// UpdateTargeting sets the "targeting" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateTargeting() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateTargeting() + }) +} + +// ClearTargeting clears the value of the "targeting" field. +func (u *AnnouncementUpsertBulk) ClearTargeting() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearTargeting() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *AnnouncementUpsertBulk) SetStartsAt(v time.Time) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateStartsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateStartsAt() + }) +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (u *AnnouncementUpsertBulk) ClearStartsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearStartsAt() + }) +} + +// SetEndsAt sets the "ends_at" field. +func (u *AnnouncementUpsertBulk) SetEndsAt(v time.Time) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetEndsAt(v) + }) +} + +// UpdateEndsAt sets the "ends_at" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateEndsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateEndsAt() + }) +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (u *AnnouncementUpsertBulk) ClearEndsAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearEndsAt() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *AnnouncementUpsertBulk) SetCreatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *AnnouncementUpsertBulk) AddCreatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateCreatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateCreatedBy() + }) +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (u *AnnouncementUpsertBulk) ClearCreatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearCreatedBy() + }) +} + +// SetUpdatedBy sets the "updated_by" field. +func (u *AnnouncementUpsertBulk) SetUpdatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedBy(v) + }) +} + +// AddUpdatedBy adds v to the "updated_by" field. +func (u *AnnouncementUpsertBulk) AddUpdatedBy(v int64) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.AddUpdatedBy(v) + }) +} + +// UpdateUpdatedBy sets the "updated_by" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateUpdatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedBy() + }) +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (u *AnnouncementUpsertBulk) ClearUpdatedBy() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.ClearUpdatedBy() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AnnouncementUpsertBulk) SetUpdatedAt(v time.Time) *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AnnouncementUpsertBulk) UpdateUpdatedAt() *AnnouncementUpsertBulk { + return u.Update(func(s *AnnouncementUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AnnouncementCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcement_delete.go b/backend/ent/announcement_delete.go new file mode 100644 index 00000000..d185e9f7 --- /dev/null +++ b/backend/ent/announcement_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AnnouncementDelete is the builder for deleting a Announcement entity. +type AnnouncementDelete struct { + config + hooks []Hook + mutation *AnnouncementMutation +} + +// Where appends a list predicates to the AnnouncementDelete builder. +func (_d *AnnouncementDelete) Where(ps ...predicate.Announcement) *AnnouncementDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AnnouncementDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AnnouncementDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(announcement.Table, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AnnouncementDeleteOne is the builder for deleting a single Announcement entity. +type AnnouncementDeleteOne struct { + _d *AnnouncementDelete +} + +// Where appends a list predicates to the AnnouncementDelete builder. +func (_d *AnnouncementDeleteOne) Where(ps ...predicate.Announcement) *AnnouncementDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AnnouncementDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{announcement.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcement_query.go b/backend/ent/announcement_query.go new file mode 100644 index 00000000..a27d50fa --- /dev/null +++ b/backend/ent/announcement_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AnnouncementQuery is the builder for querying Announcement entities. +type AnnouncementQuery struct { + config + ctx *QueryContext + order []announcement.OrderOption + inters []Interceptor + predicates []predicate.Announcement + withReads *AnnouncementReadQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AnnouncementQuery builder. +func (_q *AnnouncementQuery) Where(ps ...predicate.Announcement) *AnnouncementQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AnnouncementQuery) Limit(limit int) *AnnouncementQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AnnouncementQuery) Offset(offset int) *AnnouncementQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AnnouncementQuery) Unique(unique bool) *AnnouncementQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AnnouncementQuery) Order(o ...announcement.OrderOption) *AnnouncementQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryReads chains the current query on the "reads" edge. +func (_q *AnnouncementQuery) QueryReads() *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(announcement.Table, announcement.FieldID, selector), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, announcement.ReadsTable, announcement.ReadsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Announcement entity from the query. +// Returns a *NotFoundError when no Announcement was found. +func (_q *AnnouncementQuery) First(ctx context.Context) (*Announcement, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{announcement.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AnnouncementQuery) FirstX(ctx context.Context) *Announcement { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Announcement ID from the query. +// Returns a *NotFoundError when no Announcement ID was found. +func (_q *AnnouncementQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{announcement.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *AnnouncementQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Announcement entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Announcement entity is found. +// Returns a *NotFoundError when no Announcement entities are found. +func (_q *AnnouncementQuery) Only(ctx context.Context) (*Announcement, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{announcement.Label} + default: + return nil, &NotSingularError{announcement.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AnnouncementQuery) OnlyX(ctx context.Context) *Announcement { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Announcement ID in the query. +// Returns a *NotSingularError when more than one Announcement ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *AnnouncementQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{announcement.Label} + default: + err = &NotSingularError{announcement.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *AnnouncementQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Announcements. +func (_q *AnnouncementQuery) All(ctx context.Context) ([]*Announcement, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Announcement, *AnnouncementQuery]() + return withInterceptors[[]*Announcement](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AnnouncementQuery) AllX(ctx context.Context) []*Announcement { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Announcement IDs. +func (_q *AnnouncementQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(announcement.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *AnnouncementQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *AnnouncementQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AnnouncementQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AnnouncementQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AnnouncementQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AnnouncementQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AnnouncementQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AnnouncementQuery) Clone() *AnnouncementQuery { + if _q == nil { + return nil + } + return &AnnouncementQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]announcement.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Announcement{}, _q.predicates...), + withReads: _q.withReads.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithReads tells the query-builder to eager-load the nodes that are connected to +// the "reads" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AnnouncementQuery) WithReads(opts ...func(*AnnouncementReadQuery)) *AnnouncementQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withReads = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Title string `json:"title,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Announcement.Query(). +// GroupBy(announcement.FieldTitle). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AnnouncementQuery) GroupBy(field string, fields ...string) *AnnouncementGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AnnouncementGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = announcement.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Title string `json:"title,omitempty"` +// } +// +// client.Announcement.Query(). +// Select(announcement.FieldTitle). +// Scan(ctx, &v) +func (_q *AnnouncementQuery) Select(fields ...string) *AnnouncementSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AnnouncementSelect{AnnouncementQuery: _q} + sbuild.label = announcement.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AnnouncementSelect configured with the given aggregations. +func (_q *AnnouncementQuery) Aggregate(fns ...AggregateFunc) *AnnouncementSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AnnouncementQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !announcement.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AnnouncementQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Announcement, error) { + var ( + nodes = []*Announcement{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withReads != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Announcement).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Announcement{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withReads; query != nil { + if err := _q.loadReads(ctx, query, nodes, + func(n *Announcement) { n.Edges.Reads = []*AnnouncementRead{} }, + func(n *Announcement, e *AnnouncementRead) { n.Edges.Reads = append(n.Edges.Reads, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AnnouncementQuery) loadReads(ctx context.Context, query *AnnouncementReadQuery, nodes []*Announcement, init func(*Announcement), assign func(*Announcement, *AnnouncementRead)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Announcement) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(announcementread.FieldAnnouncementID) + } + query.Where(predicate.AnnouncementRead(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(announcement.ReadsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AnnouncementID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "announcement_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *AnnouncementQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AnnouncementQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcement.FieldID) + for i := range fields { + if fields[i] != announcement.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AnnouncementQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(announcement.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = announcement.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AnnouncementQuery) ForUpdate(opts ...sql.LockOption) *AnnouncementQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AnnouncementQuery) ForShare(opts ...sql.LockOption) *AnnouncementQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// AnnouncementGroupBy is the group-by builder for Announcement entities. +type AnnouncementGroupBy struct { + selector + build *AnnouncementQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AnnouncementGroupBy) Aggregate(fns ...AggregateFunc) *AnnouncementGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AnnouncementGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementQuery, *AnnouncementGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AnnouncementGroupBy) sqlScan(ctx context.Context, root *AnnouncementQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AnnouncementSelect is the builder for selecting fields of Announcement entities. +type AnnouncementSelect struct { + *AnnouncementQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AnnouncementSelect) Aggregate(fns ...AggregateFunc) *AnnouncementSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AnnouncementSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementQuery, *AnnouncementSelect](ctx, _s.AnnouncementQuery, _s, _s.inters, v) +} + +func (_s *AnnouncementSelect) sqlScan(ctx context.Context, root *AnnouncementQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/announcement_update.go b/backend/ent/announcement_update.go new file mode 100644 index 00000000..702d0817 --- /dev/null +++ b/backend/ent/announcement_update.go @@ -0,0 +1,824 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/internal/domain" +) + +// AnnouncementUpdate is the builder for updating Announcement entities. +type AnnouncementUpdate struct { + config + hooks []Hook + mutation *AnnouncementMutation +} + +// Where appends a list predicates to the AnnouncementUpdate builder. +func (_u *AnnouncementUpdate) Where(ps ...predicate.Announcement) *AnnouncementUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetTitle sets the "title" field. +func (_u *AnnouncementUpdate) SetTitle(v string) *AnnouncementUpdate { + _u.mutation.SetTitle(v) + return _u +} + +// SetNillableTitle sets the "title" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableTitle(v *string) *AnnouncementUpdate { + if v != nil { + _u.SetTitle(*v) + } + return _u +} + +// SetContent sets the "content" field. +func (_u *AnnouncementUpdate) SetContent(v string) *AnnouncementUpdate { + _u.mutation.SetContent(v) + return _u +} + +// SetNillableContent sets the "content" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableContent(v *string) *AnnouncementUpdate { + if v != nil { + _u.SetContent(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *AnnouncementUpdate) SetStatus(v string) *AnnouncementUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableStatus(v *string) *AnnouncementUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetTargeting sets the "targeting" field. +func (_u *AnnouncementUpdate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdate { + _u.mutation.SetTargeting(v) + return _u +} + +// SetNillableTargeting sets the "targeting" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementUpdate { + if v != nil { + _u.SetTargeting(*v) + } + return _u +} + +// ClearTargeting clears the value of the "targeting" field. +func (_u *AnnouncementUpdate) ClearTargeting() *AnnouncementUpdate { + _u.mutation.ClearTargeting() + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *AnnouncementUpdate) SetStartsAt(v time.Time) *AnnouncementUpdate { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableStartsAt(v *time.Time) *AnnouncementUpdate { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (_u *AnnouncementUpdate) ClearStartsAt() *AnnouncementUpdate { + _u.mutation.ClearStartsAt() + return _u +} + +// SetEndsAt sets the "ends_at" field. +func (_u *AnnouncementUpdate) SetEndsAt(v time.Time) *AnnouncementUpdate { + _u.mutation.SetEndsAt(v) + return _u +} + +// SetNillableEndsAt sets the "ends_at" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableEndsAt(v *time.Time) *AnnouncementUpdate { + if v != nil { + _u.SetEndsAt(*v) + } + return _u +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (_u *AnnouncementUpdate) ClearEndsAt() *AnnouncementUpdate { + _u.mutation.ClearEndsAt() + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *AnnouncementUpdate) SetCreatedBy(v int64) *AnnouncementUpdate { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableCreatedBy(v *int64) *AnnouncementUpdate { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *AnnouncementUpdate) AddCreatedBy(v int64) *AnnouncementUpdate { + _u.mutation.AddCreatedBy(v) + return _u +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (_u *AnnouncementUpdate) ClearCreatedBy() *AnnouncementUpdate { + _u.mutation.ClearCreatedBy() + return _u +} + +// SetUpdatedBy sets the "updated_by" field. +func (_u *AnnouncementUpdate) SetUpdatedBy(v int64) *AnnouncementUpdate { + _u.mutation.ResetUpdatedBy() + _u.mutation.SetUpdatedBy(v) + return _u +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (_u *AnnouncementUpdate) SetNillableUpdatedBy(v *int64) *AnnouncementUpdate { + if v != nil { + _u.SetUpdatedBy(*v) + } + return _u +} + +// AddUpdatedBy adds value to the "updated_by" field. +func (_u *AnnouncementUpdate) AddUpdatedBy(v int64) *AnnouncementUpdate { + _u.mutation.AddUpdatedBy(v) + return _u +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (_u *AnnouncementUpdate) ClearUpdatedBy() *AnnouncementUpdate { + _u.mutation.ClearUpdatedBy() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AnnouncementUpdate) SetUpdatedAt(v time.Time) *AnnouncementUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs. +func (_u *AnnouncementUpdate) AddReadIDs(ids ...int64) *AnnouncementUpdate { + _u.mutation.AddReadIDs(ids...) + return _u +} + +// AddReads adds the "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdate) AddReads(v ...*AnnouncementRead) *AnnouncementUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddReadIDs(ids...) +} + +// Mutation returns the AnnouncementMutation object of the builder. +func (_u *AnnouncementUpdate) Mutation() *AnnouncementMutation { + return _u.mutation +} + +// ClearReads clears all "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdate) ClearReads() *AnnouncementUpdate { + _u.mutation.ClearReads() + return _u +} + +// RemoveReadIDs removes the "reads" edge to AnnouncementRead entities by IDs. +func (_u *AnnouncementUpdate) RemoveReadIDs(ids ...int64) *AnnouncementUpdate { + _u.mutation.RemoveReadIDs(ids...) + return _u +} + +// RemoveReads removes "reads" edges to AnnouncementRead entities. +func (_u *AnnouncementUpdate) RemoveReads(v ...*AnnouncementRead) *AnnouncementUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveReadIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AnnouncementUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AnnouncementUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AnnouncementUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := announcement.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementUpdate) check() error { + if v, ok := _u.mutation.Title(); ok { + if err := announcement.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)} + } + } + if v, ok := _u.mutation.Content(); ok { + if err := announcement.ContentValidator(v); err != nil { + return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := announcement.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)} + } + } + return nil +} + +func (_u *AnnouncementUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Title(); ok { + _spec.SetField(announcement.FieldTitle, field.TypeString, value) + } + if value, ok := _u.mutation.Content(); ok { + _spec.SetField(announcement.FieldContent, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(announcement.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Targeting(); ok { + _spec.SetField(announcement.FieldTargeting, field.TypeJSON, value) + } + if _u.mutation.TargetingCleared() { + _spec.ClearField(announcement.FieldTargeting, field.TypeJSON) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(announcement.FieldStartsAt, field.TypeTime, value) + } + if _u.mutation.StartsAtCleared() { + _spec.ClearField(announcement.FieldStartsAt, field.TypeTime) + } + if value, ok := _u.mutation.EndsAt(); ok { + _spec.SetField(announcement.FieldEndsAt, field.TypeTime, value) + } + if _u.mutation.EndsAtCleared() { + _spec.ClearField(announcement.FieldEndsAt, field.TypeTime) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if _u.mutation.CreatedByCleared() { + _spec.ClearField(announcement.FieldCreatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedBy(); ok { + _spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedUpdatedBy(); ok { + _spec.AddField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if _u.mutation.UpdatedByCleared() { + _spec.ClearField(announcement.FieldUpdatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedReadsIDs(); len(nodes) > 0 && !_u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcement.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AnnouncementUpdateOne is the builder for updating a single Announcement entity. +type AnnouncementUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AnnouncementMutation +} + +// SetTitle sets the "title" field. +func (_u *AnnouncementUpdateOne) SetTitle(v string) *AnnouncementUpdateOne { + _u.mutation.SetTitle(v) + return _u +} + +// SetNillableTitle sets the "title" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableTitle(v *string) *AnnouncementUpdateOne { + if v != nil { + _u.SetTitle(*v) + } + return _u +} + +// SetContent sets the "content" field. +func (_u *AnnouncementUpdateOne) SetContent(v string) *AnnouncementUpdateOne { + _u.mutation.SetContent(v) + return _u +} + +// SetNillableContent sets the "content" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableContent(v *string) *AnnouncementUpdateOne { + if v != nil { + _u.SetContent(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *AnnouncementUpdateOne) SetStatus(v string) *AnnouncementUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableStatus(v *string) *AnnouncementUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetTargeting sets the "targeting" field. +func (_u *AnnouncementUpdateOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdateOne { + _u.mutation.SetTargeting(v) + return _u +} + +// SetNillableTargeting sets the "targeting" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementUpdateOne { + if v != nil { + _u.SetTargeting(*v) + } + return _u +} + +// ClearTargeting clears the value of the "targeting" field. +func (_u *AnnouncementUpdateOne) ClearTargeting() *AnnouncementUpdateOne { + _u.mutation.ClearTargeting() + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *AnnouncementUpdateOne) SetStartsAt(v time.Time) *AnnouncementUpdateOne { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableStartsAt(v *time.Time) *AnnouncementUpdateOne { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (_u *AnnouncementUpdateOne) ClearStartsAt() *AnnouncementUpdateOne { + _u.mutation.ClearStartsAt() + return _u +} + +// SetEndsAt sets the "ends_at" field. +func (_u *AnnouncementUpdateOne) SetEndsAt(v time.Time) *AnnouncementUpdateOne { + _u.mutation.SetEndsAt(v) + return _u +} + +// SetNillableEndsAt sets the "ends_at" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableEndsAt(v *time.Time) *AnnouncementUpdateOne { + if v != nil { + _u.SetEndsAt(*v) + } + return _u +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (_u *AnnouncementUpdateOne) ClearEndsAt() *AnnouncementUpdateOne { + _u.mutation.ClearEndsAt() + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *AnnouncementUpdateOne) SetCreatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableCreatedBy(v *int64) *AnnouncementUpdateOne { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *AnnouncementUpdateOne) AddCreatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.AddCreatedBy(v) + return _u +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (_u *AnnouncementUpdateOne) ClearCreatedBy() *AnnouncementUpdateOne { + _u.mutation.ClearCreatedBy() + return _u +} + +// SetUpdatedBy sets the "updated_by" field. +func (_u *AnnouncementUpdateOne) SetUpdatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.ResetUpdatedBy() + _u.mutation.SetUpdatedBy(v) + return _u +} + +// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil. +func (_u *AnnouncementUpdateOne) SetNillableUpdatedBy(v *int64) *AnnouncementUpdateOne { + if v != nil { + _u.SetUpdatedBy(*v) + } + return _u +} + +// AddUpdatedBy adds value to the "updated_by" field. +func (_u *AnnouncementUpdateOne) AddUpdatedBy(v int64) *AnnouncementUpdateOne { + _u.mutation.AddUpdatedBy(v) + return _u +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (_u *AnnouncementUpdateOne) ClearUpdatedBy() *AnnouncementUpdateOne { + _u.mutation.ClearUpdatedBy() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AnnouncementUpdateOne) SetUpdatedAt(v time.Time) *AnnouncementUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs. +func (_u *AnnouncementUpdateOne) AddReadIDs(ids ...int64) *AnnouncementUpdateOne { + _u.mutation.AddReadIDs(ids...) + return _u +} + +// AddReads adds the "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdateOne) AddReads(v ...*AnnouncementRead) *AnnouncementUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddReadIDs(ids...) +} + +// Mutation returns the AnnouncementMutation object of the builder. +func (_u *AnnouncementUpdateOne) Mutation() *AnnouncementMutation { + return _u.mutation +} + +// ClearReads clears all "reads" edges to the AnnouncementRead entity. +func (_u *AnnouncementUpdateOne) ClearReads() *AnnouncementUpdateOne { + _u.mutation.ClearReads() + return _u +} + +// RemoveReadIDs removes the "reads" edge to AnnouncementRead entities by IDs. +func (_u *AnnouncementUpdateOne) RemoveReadIDs(ids ...int64) *AnnouncementUpdateOne { + _u.mutation.RemoveReadIDs(ids...) + return _u +} + +// RemoveReads removes "reads" edges to AnnouncementRead entities. +func (_u *AnnouncementUpdateOne) RemoveReads(v ...*AnnouncementRead) *AnnouncementUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveReadIDs(ids...) +} + +// Where appends a list predicates to the AnnouncementUpdate builder. +func (_u *AnnouncementUpdateOne) Where(ps ...predicate.Announcement) *AnnouncementUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AnnouncementUpdateOne) Select(field string, fields ...string) *AnnouncementUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Announcement entity. +func (_u *AnnouncementUpdateOne) Save(ctx context.Context) (*Announcement, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementUpdateOne) SaveX(ctx context.Context) *Announcement { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AnnouncementUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AnnouncementUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := announcement.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementUpdateOne) check() error { + if v, ok := _u.mutation.Title(); ok { + if err := announcement.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)} + } + } + if v, ok := _u.mutation.Content(); ok { + if err := announcement.ContentValidator(v); err != nil { + return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := announcement.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)} + } + } + return nil +} + +func (_u *AnnouncementUpdateOne) sqlSave(ctx context.Context) (_node *Announcement, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Announcement.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcement.FieldID) + for _, f := range fields { + if !announcement.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != announcement.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Title(); ok { + _spec.SetField(announcement.FieldTitle, field.TypeString, value) + } + if value, ok := _u.mutation.Content(); ok { + _spec.SetField(announcement.FieldContent, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(announcement.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Targeting(); ok { + _spec.SetField(announcement.FieldTargeting, field.TypeJSON, value) + } + if _u.mutation.TargetingCleared() { + _spec.ClearField(announcement.FieldTargeting, field.TypeJSON) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(announcement.FieldStartsAt, field.TypeTime, value) + } + if _u.mutation.StartsAtCleared() { + _spec.ClearField(announcement.FieldStartsAt, field.TypeTime) + } + if value, ok := _u.mutation.EndsAt(); ok { + _spec.SetField(announcement.FieldEndsAt, field.TypeTime, value) + } + if _u.mutation.EndsAtCleared() { + _spec.ClearField(announcement.FieldEndsAt, field.TypeTime) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(announcement.FieldCreatedBy, field.TypeInt64, value) + } + if _u.mutation.CreatedByCleared() { + _spec.ClearField(announcement.FieldCreatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedBy(); ok { + _spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedUpdatedBy(); ok { + _spec.AddField(announcement.FieldUpdatedBy, field.TypeInt64, value) + } + if _u.mutation.UpdatedByCleared() { + _spec.ClearField(announcement.FieldUpdatedBy, field.TypeInt64) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedReadsIDs(); len(nodes) > 0 && !_u.mutation.ReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: announcement.ReadsTable, + Columns: []string{announcement.ReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Announcement{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcement.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/announcementread.go b/backend/ent/announcementread.go new file mode 100644 index 00000000..7bba04f2 --- /dev/null +++ b/backend/ent/announcementread.go @@ -0,0 +1,185 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementRead is the model entity for the AnnouncementRead schema. +type AnnouncementRead struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // AnnouncementID holds the value of the "announcement_id" field. + AnnouncementID int64 `json:"announcement_id,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // 用户首次已读时间 + ReadAt time.Time `json:"read_at,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AnnouncementReadQuery when eager-loading is set. + Edges AnnouncementReadEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AnnouncementReadEdges holds the relations/edges for other nodes in the graph. +type AnnouncementReadEdges struct { + // Announcement holds the value of the announcement edge. + Announcement *Announcement `json:"announcement,omitempty"` + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// AnnouncementOrErr returns the Announcement value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AnnouncementReadEdges) AnnouncementOrErr() (*Announcement, error) { + if e.Announcement != nil { + return e.Announcement, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: announcement.Label} + } + return nil, &NotLoadedError{edge: "announcement"} +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AnnouncementReadEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AnnouncementRead) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case announcementread.FieldID, announcementread.FieldAnnouncementID, announcementread.FieldUserID: + values[i] = new(sql.NullInt64) + case announcementread.FieldReadAt, announcementread.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AnnouncementRead fields. +func (_m *AnnouncementRead) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case announcementread.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case announcementread.FieldAnnouncementID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field announcement_id", values[i]) + } else if value.Valid { + _m.AnnouncementID = value.Int64 + } + case announcementread.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case announcementread.FieldReadAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field read_at", values[i]) + } else if value.Valid { + _m.ReadAt = value.Time + } + case announcementread.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AnnouncementRead. +// This includes values selected through modifiers, order, etc. +func (_m *AnnouncementRead) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAnnouncement queries the "announcement" edge of the AnnouncementRead entity. +func (_m *AnnouncementRead) QueryAnnouncement() *AnnouncementQuery { + return NewAnnouncementReadClient(_m.config).QueryAnnouncement(_m) +} + +// QueryUser queries the "user" edge of the AnnouncementRead entity. +func (_m *AnnouncementRead) QueryUser() *UserQuery { + return NewAnnouncementReadClient(_m.config).QueryUser(_m) +} + +// Update returns a builder for updating this AnnouncementRead. +// Note that you need to call AnnouncementRead.Unwrap() before calling this method if this AnnouncementRead +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *AnnouncementRead) Update() *AnnouncementReadUpdateOne { + return NewAnnouncementReadClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the AnnouncementRead entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *AnnouncementRead) Unwrap() *AnnouncementRead { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: AnnouncementRead is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *AnnouncementRead) String() string { + var builder strings.Builder + builder.WriteString("AnnouncementRead(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("announcement_id=") + builder.WriteString(fmt.Sprintf("%v", _m.AnnouncementID)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("read_at=") + builder.WriteString(_m.ReadAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// AnnouncementReads is a parsable slice of AnnouncementRead. +type AnnouncementReads []*AnnouncementRead diff --git a/backend/ent/announcementread/announcementread.go b/backend/ent/announcementread/announcementread.go new file mode 100644 index 00000000..cf5fe458 --- /dev/null +++ b/backend/ent/announcementread/announcementread.go @@ -0,0 +1,127 @@ +// Code generated by ent, DO NOT EDIT. + +package announcementread + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the announcementread type in the database. + Label = "announcement_read" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldAnnouncementID holds the string denoting the announcement_id field in the database. + FieldAnnouncementID = "announcement_id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldReadAt holds the string denoting the read_at field in the database. + FieldReadAt = "read_at" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeAnnouncement holds the string denoting the announcement edge name in mutations. + EdgeAnnouncement = "announcement" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // Table holds the table name of the announcementread in the database. + Table = "announcement_reads" + // AnnouncementTable is the table that holds the announcement relation/edge. + AnnouncementTable = "announcement_reads" + // AnnouncementInverseTable is the table name for the Announcement entity. + // It exists in this package in order to avoid circular dependency with the "announcement" package. + AnnouncementInverseTable = "announcements" + // AnnouncementColumn is the table column denoting the announcement relation/edge. + AnnouncementColumn = "announcement_id" + // UserTable is the table that holds the user relation/edge. + UserTable = "announcement_reads" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" +) + +// Columns holds all SQL columns for announcementread fields. +var Columns = []string{ + FieldID, + FieldAnnouncementID, + FieldUserID, + FieldReadAt, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultReadAt holds the default value on creation for the "read_at" field. + DefaultReadAt func() time.Time + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the AnnouncementRead queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByAnnouncementID orders the results by the announcement_id field. +func ByAnnouncementID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAnnouncementID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByReadAt orders the results by the read_at field. +func ByReadAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldReadAt, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByAnnouncementField orders the results by announcement field. +func ByAnnouncementField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAnnouncementStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} +func newAnnouncementStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AnnouncementInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AnnouncementTable, AnnouncementColumn), + ) +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} diff --git a/backend/ent/announcementread/where.go b/backend/ent/announcementread/where.go new file mode 100644 index 00000000..1a4305e8 --- /dev/null +++ b/backend/ent/announcementread/where.go @@ -0,0 +1,257 @@ +// Code generated by ent, DO NOT EDIT. + +package announcementread + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLTE(FieldID, id)) +} + +// AnnouncementID applies equality check predicate on the "announcement_id" field. It's identical to AnnouncementIDEQ. +func AnnouncementID(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldAnnouncementID, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldUserID, v)) +} + +// ReadAt applies equality check predicate on the "read_at" field. It's identical to ReadAtEQ. +func ReadAt(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldReadAt, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldCreatedAt, v)) +} + +// AnnouncementIDEQ applies the EQ predicate on the "announcement_id" field. +func AnnouncementIDEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldAnnouncementID, v)) +} + +// AnnouncementIDNEQ applies the NEQ predicate on the "announcement_id" field. +func AnnouncementIDNEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldAnnouncementID, v)) +} + +// AnnouncementIDIn applies the In predicate on the "announcement_id" field. +func AnnouncementIDIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldAnnouncementID, vs...)) +} + +// AnnouncementIDNotIn applies the NotIn predicate on the "announcement_id" field. +func AnnouncementIDNotIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldAnnouncementID, vs...)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldUserID, vs...)) +} + +// ReadAtEQ applies the EQ predicate on the "read_at" field. +func ReadAtEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldReadAt, v)) +} + +// ReadAtNEQ applies the NEQ predicate on the "read_at" field. +func ReadAtNEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldReadAt, v)) +} + +// ReadAtIn applies the In predicate on the "read_at" field. +func ReadAtIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldReadAt, vs...)) +} + +// ReadAtNotIn applies the NotIn predicate on the "read_at" field. +func ReadAtNotIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldReadAt, vs...)) +} + +// ReadAtGT applies the GT predicate on the "read_at" field. +func ReadAtGT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGT(FieldReadAt, v)) +} + +// ReadAtGTE applies the GTE predicate on the "read_at" field. +func ReadAtGTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGTE(FieldReadAt, v)) +} + +// ReadAtLT applies the LT predicate on the "read_at" field. +func ReadAtLT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLT(FieldReadAt, v)) +} + +// ReadAtLTE applies the LTE predicate on the "read_at" field. +func ReadAtLTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLTE(FieldReadAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasAnnouncement applies the HasEdge predicate on the "announcement" edge. +func HasAnnouncement() predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AnnouncementTable, AnnouncementColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAnnouncementWith applies the HasEdge predicate on the "announcement" edge with a given conditions (other predicates). +func HasAnnouncementWith(preds ...predicate.Announcement) predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := newAnnouncementStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.AnnouncementRead { + return predicate.AnnouncementRead(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AnnouncementRead) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AnnouncementRead) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AnnouncementRead) predicate.AnnouncementRead { + return predicate.AnnouncementRead(sql.NotPredicates(p)) +} diff --git a/backend/ent/announcementread_create.go b/backend/ent/announcementread_create.go new file mode 100644 index 00000000..c8c211ff --- /dev/null +++ b/backend/ent/announcementread_create.go @@ -0,0 +1,660 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementReadCreate is the builder for creating a AnnouncementRead entity. +type AnnouncementReadCreate struct { + config + mutation *AnnouncementReadMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetAnnouncementID sets the "announcement_id" field. +func (_c *AnnouncementReadCreate) SetAnnouncementID(v int64) *AnnouncementReadCreate { + _c.mutation.SetAnnouncementID(v) + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *AnnouncementReadCreate) SetUserID(v int64) *AnnouncementReadCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetReadAt sets the "read_at" field. +func (_c *AnnouncementReadCreate) SetReadAt(v time.Time) *AnnouncementReadCreate { + _c.mutation.SetReadAt(v) + return _c +} + +// SetNillableReadAt sets the "read_at" field if the given value is not nil. +func (_c *AnnouncementReadCreate) SetNillableReadAt(v *time.Time) *AnnouncementReadCreate { + if v != nil { + _c.SetReadAt(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AnnouncementReadCreate) SetCreatedAt(v time.Time) *AnnouncementReadCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AnnouncementReadCreate) SetNillableCreatedAt(v *time.Time) *AnnouncementReadCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetAnnouncement sets the "announcement" edge to the Announcement entity. +func (_c *AnnouncementReadCreate) SetAnnouncement(v *Announcement) *AnnouncementReadCreate { + return _c.SetAnnouncementID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_c *AnnouncementReadCreate) SetUser(v *User) *AnnouncementReadCreate { + return _c.SetUserID(v.ID) +} + +// Mutation returns the AnnouncementReadMutation object of the builder. +func (_c *AnnouncementReadCreate) Mutation() *AnnouncementReadMutation { + return _c.mutation +} + +// Save creates the AnnouncementRead in the database. +func (_c *AnnouncementReadCreate) Save(ctx context.Context) (*AnnouncementRead, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AnnouncementReadCreate) SaveX(ctx context.Context) *AnnouncementRead { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementReadCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementReadCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AnnouncementReadCreate) defaults() { + if _, ok := _c.mutation.ReadAt(); !ok { + v := announcementread.DefaultReadAt() + _c.mutation.SetReadAt(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := announcementread.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AnnouncementReadCreate) check() error { + if _, ok := _c.mutation.AnnouncementID(); !ok { + return &ValidationError{Name: "announcement_id", err: errors.New(`ent: missing required field "AnnouncementRead.announcement_id"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "AnnouncementRead.user_id"`)} + } + if _, ok := _c.mutation.ReadAt(); !ok { + return &ValidationError{Name: "read_at", err: errors.New(`ent: missing required field "AnnouncementRead.read_at"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AnnouncementRead.created_at"`)} + } + if len(_c.mutation.AnnouncementIDs()) == 0 { + return &ValidationError{Name: "announcement", err: errors.New(`ent: missing required edge "AnnouncementRead.announcement"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "AnnouncementRead.user"`)} + } + return nil +} + +func (_c *AnnouncementReadCreate) sqlSave(ctx context.Context) (*AnnouncementRead, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *AnnouncementReadCreate) createSpec() (*AnnouncementRead, *sqlgraph.CreateSpec) { + var ( + _node = &AnnouncementRead{config: _c.config} + _spec = sqlgraph.NewCreateSpec(announcementread.Table, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.ReadAt(); ok { + _spec.SetField(announcementread.FieldReadAt, field.TypeTime, value) + _node.ReadAt = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(announcementread.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.AnnouncementIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AnnouncementID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AnnouncementRead.Create(). +// SetAnnouncementID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementReadUpsert) { +// SetAnnouncementID(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementReadCreate) OnConflict(opts ...sql.ConflictOption) *AnnouncementReadUpsertOne { + _c.conflict = opts + return &AnnouncementReadUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementReadCreate) OnConflictColumns(columns ...string) *AnnouncementReadUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementReadUpsertOne{ + create: _c, + } +} + +type ( + // AnnouncementReadUpsertOne is the builder for "upsert"-ing + // one AnnouncementRead node. + AnnouncementReadUpsertOne struct { + create *AnnouncementReadCreate + } + + // AnnouncementReadUpsert is the "OnConflict" setter. + AnnouncementReadUpsert struct { + *sql.UpdateSet + } +) + +// SetAnnouncementID sets the "announcement_id" field. +func (u *AnnouncementReadUpsert) SetAnnouncementID(v int64) *AnnouncementReadUpsert { + u.Set(announcementread.FieldAnnouncementID, v) + return u +} + +// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsert) UpdateAnnouncementID() *AnnouncementReadUpsert { + u.SetExcluded(announcementread.FieldAnnouncementID) + return u +} + +// SetUserID sets the "user_id" field. +func (u *AnnouncementReadUpsert) SetUserID(v int64) *AnnouncementReadUpsert { + u.Set(announcementread.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsert) UpdateUserID() *AnnouncementReadUpsert { + u.SetExcluded(announcementread.FieldUserID) + return u +} + +// SetReadAt sets the "read_at" field. +func (u *AnnouncementReadUpsert) SetReadAt(v time.Time) *AnnouncementReadUpsert { + u.Set(announcementread.FieldReadAt, v) + return u +} + +// UpdateReadAt sets the "read_at" field to the value that was provided on create. +func (u *AnnouncementReadUpsert) UpdateReadAt() *AnnouncementReadUpsert { + u.SetExcluded(announcementread.FieldReadAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementReadUpsertOne) UpdateNewValues() *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(announcementread.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementReadUpsertOne) Ignore() *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementReadUpsertOne) DoNothing() *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementReadCreate.OnConflict +// documentation for more info. +func (u *AnnouncementReadUpsertOne) Update(set func(*AnnouncementReadUpsert)) *AnnouncementReadUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementReadUpsert{UpdateSet: update}) + })) + return u +} + +// SetAnnouncementID sets the "announcement_id" field. +func (u *AnnouncementReadUpsertOne) SetAnnouncementID(v int64) *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetAnnouncementID(v) + }) +} + +// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertOne) UpdateAnnouncementID() *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateAnnouncementID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *AnnouncementReadUpsertOne) SetUserID(v int64) *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertOne) UpdateUserID() *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateUserID() + }) +} + +// SetReadAt sets the "read_at" field. +func (u *AnnouncementReadUpsertOne) SetReadAt(v time.Time) *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetReadAt(v) + }) +} + +// UpdateReadAt sets the "read_at" field to the value that was provided on create. +func (u *AnnouncementReadUpsertOne) UpdateReadAt() *AnnouncementReadUpsertOne { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateReadAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementReadUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementReadCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementReadUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *AnnouncementReadUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *AnnouncementReadUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// AnnouncementReadCreateBulk is the builder for creating many AnnouncementRead entities in bulk. +type AnnouncementReadCreateBulk struct { + config + err error + builders []*AnnouncementReadCreate + conflict []sql.ConflictOption +} + +// Save creates the AnnouncementRead entities in the database. +func (_c *AnnouncementReadCreateBulk) Save(ctx context.Context) ([]*AnnouncementRead, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*AnnouncementRead, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AnnouncementReadMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AnnouncementReadCreateBulk) SaveX(ctx context.Context) []*AnnouncementRead { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AnnouncementReadCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AnnouncementReadCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AnnouncementRead.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AnnouncementReadUpsert) { +// SetAnnouncementID(v+v). +// }). +// Exec(ctx) +func (_c *AnnouncementReadCreateBulk) OnConflict(opts ...sql.ConflictOption) *AnnouncementReadUpsertBulk { + _c.conflict = opts + return &AnnouncementReadUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AnnouncementReadCreateBulk) OnConflictColumns(columns ...string) *AnnouncementReadUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AnnouncementReadUpsertBulk{ + create: _c, + } +} + +// AnnouncementReadUpsertBulk is the builder for "upsert"-ing +// a bulk of AnnouncementRead nodes. +type AnnouncementReadUpsertBulk struct { + create *AnnouncementReadCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AnnouncementReadUpsertBulk) UpdateNewValues() *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(announcementread.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AnnouncementRead.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AnnouncementReadUpsertBulk) Ignore() *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AnnouncementReadUpsertBulk) DoNothing() *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AnnouncementReadCreateBulk.OnConflict +// documentation for more info. +func (u *AnnouncementReadUpsertBulk) Update(set func(*AnnouncementReadUpsert)) *AnnouncementReadUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AnnouncementReadUpsert{UpdateSet: update}) + })) + return u +} + +// SetAnnouncementID sets the "announcement_id" field. +func (u *AnnouncementReadUpsertBulk) SetAnnouncementID(v int64) *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetAnnouncementID(v) + }) +} + +// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertBulk) UpdateAnnouncementID() *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateAnnouncementID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *AnnouncementReadUpsertBulk) SetUserID(v int64) *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *AnnouncementReadUpsertBulk) UpdateUserID() *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateUserID() + }) +} + +// SetReadAt sets the "read_at" field. +func (u *AnnouncementReadUpsertBulk) SetReadAt(v time.Time) *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.SetReadAt(v) + }) +} + +// UpdateReadAt sets the "read_at" field to the value that was provided on create. +func (u *AnnouncementReadUpsertBulk) UpdateReadAt() *AnnouncementReadUpsertBulk { + return u.Update(func(s *AnnouncementReadUpsert) { + s.UpdateReadAt() + }) +} + +// Exec executes the query. +func (u *AnnouncementReadUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AnnouncementReadCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AnnouncementReadCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AnnouncementReadUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcementread_delete.go b/backend/ent/announcementread_delete.go new file mode 100644 index 00000000..a4da0821 --- /dev/null +++ b/backend/ent/announcementread_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AnnouncementReadDelete is the builder for deleting a AnnouncementRead entity. +type AnnouncementReadDelete struct { + config + hooks []Hook + mutation *AnnouncementReadMutation +} + +// Where appends a list predicates to the AnnouncementReadDelete builder. +func (_d *AnnouncementReadDelete) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AnnouncementReadDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementReadDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AnnouncementReadDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(announcementread.Table, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AnnouncementReadDeleteOne is the builder for deleting a single AnnouncementRead entity. +type AnnouncementReadDeleteOne struct { + _d *AnnouncementReadDelete +} + +// Where appends a list predicates to the AnnouncementReadDelete builder. +func (_d *AnnouncementReadDeleteOne) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AnnouncementReadDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{announcementread.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AnnouncementReadDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/announcementread_query.go b/backend/ent/announcementread_query.go new file mode 100644 index 00000000..108299fd --- /dev/null +++ b/backend/ent/announcementread_query.go @@ -0,0 +1,718 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementReadQuery is the builder for querying AnnouncementRead entities. +type AnnouncementReadQuery struct { + config + ctx *QueryContext + order []announcementread.OrderOption + inters []Interceptor + predicates []predicate.AnnouncementRead + withAnnouncement *AnnouncementQuery + withUser *UserQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AnnouncementReadQuery builder. +func (_q *AnnouncementReadQuery) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AnnouncementReadQuery) Limit(limit int) *AnnouncementReadQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AnnouncementReadQuery) Offset(offset int) *AnnouncementReadQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AnnouncementReadQuery) Unique(unique bool) *AnnouncementReadQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AnnouncementReadQuery) Order(o ...announcementread.OrderOption) *AnnouncementReadQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAnnouncement chains the current query on the "announcement" edge. +func (_q *AnnouncementReadQuery) QueryAnnouncement() *AnnouncementQuery { + query := (&AnnouncementClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, selector), + sqlgraph.To(announcement.Table, announcement.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.AnnouncementTable, announcementread.AnnouncementColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUser chains the current query on the "user" edge. +func (_q *AnnouncementReadQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.UserTable, announcementread.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first AnnouncementRead entity from the query. +// Returns a *NotFoundError when no AnnouncementRead was found. +func (_q *AnnouncementReadQuery) First(ctx context.Context) (*AnnouncementRead, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{announcementread.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AnnouncementReadQuery) FirstX(ctx context.Context) *AnnouncementRead { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AnnouncementRead ID from the query. +// Returns a *NotFoundError when no AnnouncementRead ID was found. +func (_q *AnnouncementReadQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{announcementread.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *AnnouncementReadQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AnnouncementRead entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AnnouncementRead entity is found. +// Returns a *NotFoundError when no AnnouncementRead entities are found. +func (_q *AnnouncementReadQuery) Only(ctx context.Context) (*AnnouncementRead, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{announcementread.Label} + default: + return nil, &NotSingularError{announcementread.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AnnouncementReadQuery) OnlyX(ctx context.Context) *AnnouncementRead { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AnnouncementRead ID in the query. +// Returns a *NotSingularError when more than one AnnouncementRead ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *AnnouncementReadQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{announcementread.Label} + default: + err = &NotSingularError{announcementread.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *AnnouncementReadQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AnnouncementReads. +func (_q *AnnouncementReadQuery) All(ctx context.Context) ([]*AnnouncementRead, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AnnouncementRead, *AnnouncementReadQuery]() + return withInterceptors[[]*AnnouncementRead](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AnnouncementReadQuery) AllX(ctx context.Context) []*AnnouncementRead { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AnnouncementRead IDs. +func (_q *AnnouncementReadQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(announcementread.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *AnnouncementReadQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *AnnouncementReadQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AnnouncementReadQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AnnouncementReadQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AnnouncementReadQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AnnouncementReadQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AnnouncementReadQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AnnouncementReadQuery) Clone() *AnnouncementReadQuery { + if _q == nil { + return nil + } + return &AnnouncementReadQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]announcementread.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.AnnouncementRead{}, _q.predicates...), + withAnnouncement: _q.withAnnouncement.Clone(), + withUser: _q.withUser.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAnnouncement tells the query-builder to eager-load the nodes that are connected to +// the "announcement" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AnnouncementReadQuery) WithAnnouncement(opts ...func(*AnnouncementQuery)) *AnnouncementReadQuery { + query := (&AnnouncementClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAnnouncement = query + return _q +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AnnouncementReadQuery) WithUser(opts ...func(*UserQuery)) *AnnouncementReadQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// AnnouncementID int64 `json:"announcement_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AnnouncementRead.Query(). +// GroupBy(announcementread.FieldAnnouncementID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AnnouncementReadQuery) GroupBy(field string, fields ...string) *AnnouncementReadGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AnnouncementReadGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = announcementread.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// AnnouncementID int64 `json:"announcement_id,omitempty"` +// } +// +// client.AnnouncementRead.Query(). +// Select(announcementread.FieldAnnouncementID). +// Scan(ctx, &v) +func (_q *AnnouncementReadQuery) Select(fields ...string) *AnnouncementReadSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AnnouncementReadSelect{AnnouncementReadQuery: _q} + sbuild.label = announcementread.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AnnouncementReadSelect configured with the given aggregations. +func (_q *AnnouncementReadQuery) Aggregate(fns ...AggregateFunc) *AnnouncementReadSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AnnouncementReadQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !announcementread.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AnnouncementReadQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AnnouncementRead, error) { + var ( + nodes = []*AnnouncementRead{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withAnnouncement != nil, + _q.withUser != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AnnouncementRead).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AnnouncementRead{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAnnouncement; query != nil { + if err := _q.loadAnnouncement(ctx, query, nodes, nil, + func(n *AnnouncementRead, e *Announcement) { n.Edges.Announcement = e }); err != nil { + return nil, err + } + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *AnnouncementRead, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AnnouncementReadQuery) loadAnnouncement(ctx context.Context, query *AnnouncementQuery, nodes []*AnnouncementRead, init func(*AnnouncementRead), assign func(*AnnouncementRead, *Announcement)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AnnouncementRead) + for i := range nodes { + fk := nodes[i].AnnouncementID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(announcement.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "announcement_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *AnnouncementReadQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*AnnouncementRead, init func(*AnnouncementRead), assign func(*AnnouncementRead, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AnnouncementRead) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *AnnouncementReadQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AnnouncementReadQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcementread.FieldID) + for i := range fields { + if fields[i] != announcementread.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withAnnouncement != nil { + _spec.Node.AddColumnOnce(announcementread.FieldAnnouncementID) + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(announcementread.FieldUserID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AnnouncementReadQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(announcementread.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = announcementread.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AnnouncementReadQuery) ForUpdate(opts ...sql.LockOption) *AnnouncementReadQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AnnouncementReadQuery) ForShare(opts ...sql.LockOption) *AnnouncementReadQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// AnnouncementReadGroupBy is the group-by builder for AnnouncementRead entities. +type AnnouncementReadGroupBy struct { + selector + build *AnnouncementReadQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AnnouncementReadGroupBy) Aggregate(fns ...AggregateFunc) *AnnouncementReadGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AnnouncementReadGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementReadQuery, *AnnouncementReadGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AnnouncementReadGroupBy) sqlScan(ctx context.Context, root *AnnouncementReadQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AnnouncementReadSelect is the builder for selecting fields of AnnouncementRead entities. +type AnnouncementReadSelect struct { + *AnnouncementReadQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AnnouncementReadSelect) Aggregate(fns ...AggregateFunc) *AnnouncementReadSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AnnouncementReadSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AnnouncementReadQuery, *AnnouncementReadSelect](ctx, _s.AnnouncementReadQuery, _s, _s.inters, v) +} + +func (_s *AnnouncementReadSelect) sqlScan(ctx context.Context, root *AnnouncementReadQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/announcementread_update.go b/backend/ent/announcementread_update.go new file mode 100644 index 00000000..55a4eef8 --- /dev/null +++ b/backend/ent/announcementread_update.go @@ -0,0 +1,456 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// AnnouncementReadUpdate is the builder for updating AnnouncementRead entities. +type AnnouncementReadUpdate struct { + config + hooks []Hook + mutation *AnnouncementReadMutation +} + +// Where appends a list predicates to the AnnouncementReadUpdate builder. +func (_u *AnnouncementReadUpdate) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetAnnouncementID sets the "announcement_id" field. +func (_u *AnnouncementReadUpdate) SetAnnouncementID(v int64) *AnnouncementReadUpdate { + _u.mutation.SetAnnouncementID(v) + return _u +} + +// SetNillableAnnouncementID sets the "announcement_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdate) SetNillableAnnouncementID(v *int64) *AnnouncementReadUpdate { + if v != nil { + _u.SetAnnouncementID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *AnnouncementReadUpdate) SetUserID(v int64) *AnnouncementReadUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdate) SetNillableUserID(v *int64) *AnnouncementReadUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetReadAt sets the "read_at" field. +func (_u *AnnouncementReadUpdate) SetReadAt(v time.Time) *AnnouncementReadUpdate { + _u.mutation.SetReadAt(v) + return _u +} + +// SetNillableReadAt sets the "read_at" field if the given value is not nil. +func (_u *AnnouncementReadUpdate) SetNillableReadAt(v *time.Time) *AnnouncementReadUpdate { + if v != nil { + _u.SetReadAt(*v) + } + return _u +} + +// SetAnnouncement sets the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdate) SetAnnouncement(v *Announcement) *AnnouncementReadUpdate { + return _u.SetAnnouncementID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *AnnouncementReadUpdate) SetUser(v *User) *AnnouncementReadUpdate { + return _u.SetUserID(v.ID) +} + +// Mutation returns the AnnouncementReadMutation object of the builder. +func (_u *AnnouncementReadUpdate) Mutation() *AnnouncementReadMutation { + return _u.mutation +} + +// ClearAnnouncement clears the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdate) ClearAnnouncement() *AnnouncementReadUpdate { + _u.mutation.ClearAnnouncement() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *AnnouncementReadUpdate) ClearUser() *AnnouncementReadUpdate { + _u.mutation.ClearUser() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AnnouncementReadUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementReadUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AnnouncementReadUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementReadUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementReadUpdate) check() error { + if _u.mutation.AnnouncementCleared() && len(_u.mutation.AnnouncementIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.announcement"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.user"`) + } + return nil +} + +func (_u *AnnouncementReadUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.ReadAt(); ok { + _spec.SetField(announcementread.FieldReadAt, field.TypeTime, value) + } + if _u.mutation.AnnouncementCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcementread.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AnnouncementReadUpdateOne is the builder for updating a single AnnouncementRead entity. +type AnnouncementReadUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AnnouncementReadMutation +} + +// SetAnnouncementID sets the "announcement_id" field. +func (_u *AnnouncementReadUpdateOne) SetAnnouncementID(v int64) *AnnouncementReadUpdateOne { + _u.mutation.SetAnnouncementID(v) + return _u +} + +// SetNillableAnnouncementID sets the "announcement_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdateOne) SetNillableAnnouncementID(v *int64) *AnnouncementReadUpdateOne { + if v != nil { + _u.SetAnnouncementID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *AnnouncementReadUpdateOne) SetUserID(v int64) *AnnouncementReadUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *AnnouncementReadUpdateOne) SetNillableUserID(v *int64) *AnnouncementReadUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetReadAt sets the "read_at" field. +func (_u *AnnouncementReadUpdateOne) SetReadAt(v time.Time) *AnnouncementReadUpdateOne { + _u.mutation.SetReadAt(v) + return _u +} + +// SetNillableReadAt sets the "read_at" field if the given value is not nil. +func (_u *AnnouncementReadUpdateOne) SetNillableReadAt(v *time.Time) *AnnouncementReadUpdateOne { + if v != nil { + _u.SetReadAt(*v) + } + return _u +} + +// SetAnnouncement sets the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdateOne) SetAnnouncement(v *Announcement) *AnnouncementReadUpdateOne { + return _u.SetAnnouncementID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *AnnouncementReadUpdateOne) SetUser(v *User) *AnnouncementReadUpdateOne { + return _u.SetUserID(v.ID) +} + +// Mutation returns the AnnouncementReadMutation object of the builder. +func (_u *AnnouncementReadUpdateOne) Mutation() *AnnouncementReadMutation { + return _u.mutation +} + +// ClearAnnouncement clears the "announcement" edge to the Announcement entity. +func (_u *AnnouncementReadUpdateOne) ClearAnnouncement() *AnnouncementReadUpdateOne { + _u.mutation.ClearAnnouncement() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *AnnouncementReadUpdateOne) ClearUser() *AnnouncementReadUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// Where appends a list predicates to the AnnouncementReadUpdate builder. +func (_u *AnnouncementReadUpdateOne) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AnnouncementReadUpdateOne) Select(field string, fields ...string) *AnnouncementReadUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated AnnouncementRead entity. +func (_u *AnnouncementReadUpdateOne) Save(ctx context.Context) (*AnnouncementRead, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AnnouncementReadUpdateOne) SaveX(ctx context.Context) *AnnouncementRead { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AnnouncementReadUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AnnouncementReadUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AnnouncementReadUpdateOne) check() error { + if _u.mutation.AnnouncementCleared() && len(_u.mutation.AnnouncementIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.announcement"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AnnouncementRead.user"`) + } + return nil +} + +func (_u *AnnouncementReadUpdateOne) sqlSave(ctx context.Context) (_node *AnnouncementRead, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AnnouncementRead.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, announcementread.FieldID) + for _, f := range fields { + if !announcementread.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != announcementread.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.ReadAt(); ok { + _spec.SetField(announcementread.FieldReadAt, field.TypeTime, value) + } + if _u.mutation.AnnouncementCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.AnnouncementTable, + Columns: []string{announcementread.AnnouncementColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: announcementread.UserTable, + Columns: []string{announcementread.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &AnnouncementRead{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{announcementread.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/client.go b/backend/ent/client.go index 35cf644f..a17721da 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -17,6 +17,8 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocode" @@ -24,6 +26,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -45,6 +48,10 @@ type Client struct { Account *AccountClient // AccountGroup is the client for interacting with the AccountGroup builders. AccountGroup *AccountGroupClient + // Announcement is the client for interacting with the Announcement builders. + Announcement *AnnouncementClient + // AnnouncementRead is the client for interacting with the AnnouncementRead builders. + AnnouncementRead *AnnouncementReadClient // Group is the client for interacting with the Group builders. Group *GroupClient // PromoCode is the client for interacting with the PromoCode builders. @@ -57,6 +64,8 @@ type Client struct { RedeemCode *RedeemCodeClient // Setting is the client for interacting with the Setting builders. Setting *SettingClient + // UsageCleanupTask is the client for interacting with the UsageCleanupTask builders. + UsageCleanupTask *UsageCleanupTaskClient // UsageLog is the client for interacting with the UsageLog builders. UsageLog *UsageLogClient // User is the client for interacting with the User builders. @@ -83,12 +92,15 @@ func (c *Client) init() { c.APIKey = NewAPIKeyClient(c.config) c.Account = NewAccountClient(c.config) c.AccountGroup = NewAccountGroupClient(c.config) + c.Announcement = NewAnnouncementClient(c.config) + c.AnnouncementRead = NewAnnouncementReadClient(c.config) c.Group = NewGroupClient(c.config) c.PromoCode = NewPromoCodeClient(c.config) c.PromoCodeUsage = NewPromoCodeUsageClient(c.config) c.Proxy = NewProxyClient(c.config) c.RedeemCode = NewRedeemCodeClient(c.config) c.Setting = NewSettingClient(c.config) + c.UsageCleanupTask = NewUsageCleanupTaskClient(c.config) c.UsageLog = NewUsageLogClient(c.config) c.User = NewUserClient(c.config) c.UserAllowedGroup = NewUserAllowedGroupClient(c.config) @@ -190,12 +202,15 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { APIKey: NewAPIKeyClient(cfg), Account: NewAccountClient(cfg), AccountGroup: NewAccountGroupClient(cfg), + Announcement: NewAnnouncementClient(cfg), + AnnouncementRead: NewAnnouncementReadClient(cfg), Group: NewGroupClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), RedeemCode: NewRedeemCodeClient(cfg), Setting: NewSettingClient(cfg), + UsageCleanupTask: NewUsageCleanupTaskClient(cfg), UsageLog: NewUsageLogClient(cfg), User: NewUserClient(cfg), UserAllowedGroup: NewUserAllowedGroupClient(cfg), @@ -224,12 +239,15 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) APIKey: NewAPIKeyClient(cfg), Account: NewAccountClient(cfg), AccountGroup: NewAccountGroupClient(cfg), + Announcement: NewAnnouncementClient(cfg), + AnnouncementRead: NewAnnouncementReadClient(cfg), Group: NewGroupClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), RedeemCode: NewRedeemCodeClient(cfg), Setting: NewSettingClient(cfg), + UsageCleanupTask: NewUsageCleanupTaskClient(cfg), UsageLog: NewUsageLogClient(cfg), User: NewUserClient(cfg), UserAllowedGroup: NewUserAllowedGroupClient(cfg), @@ -265,8 +283,9 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, + c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, + c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) @@ -277,8 +296,9 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, + c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, + c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) @@ -294,6 +314,10 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Account.mutate(ctx, m) case *AccountGroupMutation: return c.AccountGroup.mutate(ctx, m) + case *AnnouncementMutation: + return c.Announcement.mutate(ctx, m) + case *AnnouncementReadMutation: + return c.AnnouncementRead.mutate(ctx, m) case *GroupMutation: return c.Group.mutate(ctx, m) case *PromoCodeMutation: @@ -306,6 +330,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.RedeemCode.mutate(ctx, m) case *SettingMutation: return c.Setting.mutate(ctx, m) + case *UsageCleanupTaskMutation: + return c.UsageCleanupTask.mutate(ctx, m) case *UsageLogMutation: return c.UsageLog.mutate(ctx, m) case *UserMutation: @@ -821,6 +847,320 @@ func (c *AccountGroupClient) mutate(ctx context.Context, m *AccountGroupMutation } } +// AnnouncementClient is a client for the Announcement schema. +type AnnouncementClient struct { + config +} + +// NewAnnouncementClient returns a client for the Announcement from the given config. +func NewAnnouncementClient(c config) *AnnouncementClient { + return &AnnouncementClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `announcement.Hooks(f(g(h())))`. +func (c *AnnouncementClient) Use(hooks ...Hook) { + c.hooks.Announcement = append(c.hooks.Announcement, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `announcement.Intercept(f(g(h())))`. +func (c *AnnouncementClient) Intercept(interceptors ...Interceptor) { + c.inters.Announcement = append(c.inters.Announcement, interceptors...) +} + +// Create returns a builder for creating a Announcement entity. +func (c *AnnouncementClient) Create() *AnnouncementCreate { + mutation := newAnnouncementMutation(c.config, OpCreate) + return &AnnouncementCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Announcement entities. +func (c *AnnouncementClient) CreateBulk(builders ...*AnnouncementCreate) *AnnouncementCreateBulk { + return &AnnouncementCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AnnouncementClient) MapCreateBulk(slice any, setFunc func(*AnnouncementCreate, int)) *AnnouncementCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AnnouncementCreateBulk{err: fmt.Errorf("calling to AnnouncementClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AnnouncementCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AnnouncementCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Announcement. +func (c *AnnouncementClient) Update() *AnnouncementUpdate { + mutation := newAnnouncementMutation(c.config, OpUpdate) + return &AnnouncementUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AnnouncementClient) UpdateOne(_m *Announcement) *AnnouncementUpdateOne { + mutation := newAnnouncementMutation(c.config, OpUpdateOne, withAnnouncement(_m)) + return &AnnouncementUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AnnouncementClient) UpdateOneID(id int64) *AnnouncementUpdateOne { + mutation := newAnnouncementMutation(c.config, OpUpdateOne, withAnnouncementID(id)) + return &AnnouncementUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Announcement. +func (c *AnnouncementClient) Delete() *AnnouncementDelete { + mutation := newAnnouncementMutation(c.config, OpDelete) + return &AnnouncementDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AnnouncementClient) DeleteOne(_m *Announcement) *AnnouncementDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AnnouncementClient) DeleteOneID(id int64) *AnnouncementDeleteOne { + builder := c.Delete().Where(announcement.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AnnouncementDeleteOne{builder} +} + +// Query returns a query builder for Announcement. +func (c *AnnouncementClient) Query() *AnnouncementQuery { + return &AnnouncementQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAnnouncement}, + inters: c.Interceptors(), + } +} + +// Get returns a Announcement entity by its id. +func (c *AnnouncementClient) Get(ctx context.Context, id int64) (*Announcement, error) { + return c.Query().Where(announcement.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AnnouncementClient) GetX(ctx context.Context, id int64) *Announcement { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryReads queries the reads edge of a Announcement. +func (c *AnnouncementClient) QueryReads(_m *Announcement) *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(announcement.Table, announcement.FieldID, id), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, announcement.ReadsTable, announcement.ReadsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AnnouncementClient) Hooks() []Hook { + return c.hooks.Announcement +} + +// Interceptors returns the client interceptors. +func (c *AnnouncementClient) Interceptors() []Interceptor { + return c.inters.Announcement +} + +func (c *AnnouncementClient) mutate(ctx context.Context, m *AnnouncementMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AnnouncementCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AnnouncementUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AnnouncementUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AnnouncementDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Announcement mutation op: %q", m.Op()) + } +} + +// AnnouncementReadClient is a client for the AnnouncementRead schema. +type AnnouncementReadClient struct { + config +} + +// NewAnnouncementReadClient returns a client for the AnnouncementRead from the given config. +func NewAnnouncementReadClient(c config) *AnnouncementReadClient { + return &AnnouncementReadClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `announcementread.Hooks(f(g(h())))`. +func (c *AnnouncementReadClient) Use(hooks ...Hook) { + c.hooks.AnnouncementRead = append(c.hooks.AnnouncementRead, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `announcementread.Intercept(f(g(h())))`. +func (c *AnnouncementReadClient) Intercept(interceptors ...Interceptor) { + c.inters.AnnouncementRead = append(c.inters.AnnouncementRead, interceptors...) +} + +// Create returns a builder for creating a AnnouncementRead entity. +func (c *AnnouncementReadClient) Create() *AnnouncementReadCreate { + mutation := newAnnouncementReadMutation(c.config, OpCreate) + return &AnnouncementReadCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AnnouncementRead entities. +func (c *AnnouncementReadClient) CreateBulk(builders ...*AnnouncementReadCreate) *AnnouncementReadCreateBulk { + return &AnnouncementReadCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AnnouncementReadClient) MapCreateBulk(slice any, setFunc func(*AnnouncementReadCreate, int)) *AnnouncementReadCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AnnouncementReadCreateBulk{err: fmt.Errorf("calling to AnnouncementReadClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AnnouncementReadCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AnnouncementReadCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AnnouncementRead. +func (c *AnnouncementReadClient) Update() *AnnouncementReadUpdate { + mutation := newAnnouncementReadMutation(c.config, OpUpdate) + return &AnnouncementReadUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AnnouncementReadClient) UpdateOne(_m *AnnouncementRead) *AnnouncementReadUpdateOne { + mutation := newAnnouncementReadMutation(c.config, OpUpdateOne, withAnnouncementRead(_m)) + return &AnnouncementReadUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AnnouncementReadClient) UpdateOneID(id int64) *AnnouncementReadUpdateOne { + mutation := newAnnouncementReadMutation(c.config, OpUpdateOne, withAnnouncementReadID(id)) + return &AnnouncementReadUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AnnouncementRead. +func (c *AnnouncementReadClient) Delete() *AnnouncementReadDelete { + mutation := newAnnouncementReadMutation(c.config, OpDelete) + return &AnnouncementReadDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AnnouncementReadClient) DeleteOne(_m *AnnouncementRead) *AnnouncementReadDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AnnouncementReadClient) DeleteOneID(id int64) *AnnouncementReadDeleteOne { + builder := c.Delete().Where(announcementread.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AnnouncementReadDeleteOne{builder} +} + +// Query returns a query builder for AnnouncementRead. +func (c *AnnouncementReadClient) Query() *AnnouncementReadQuery { + return &AnnouncementReadQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAnnouncementRead}, + inters: c.Interceptors(), + } +} + +// Get returns a AnnouncementRead entity by its id. +func (c *AnnouncementReadClient) Get(ctx context.Context, id int64) (*AnnouncementRead, error) { + return c.Query().Where(announcementread.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AnnouncementReadClient) GetX(ctx context.Context, id int64) *AnnouncementRead { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAnnouncement queries the announcement edge of a AnnouncementRead. +func (c *AnnouncementReadClient) QueryAnnouncement(_m *AnnouncementRead) *AnnouncementQuery { + query := (&AnnouncementClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, id), + sqlgraph.To(announcement.Table, announcement.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.AnnouncementTable, announcementread.AnnouncementColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUser queries the user edge of a AnnouncementRead. +func (c *AnnouncementReadClient) QueryUser(_m *AnnouncementRead) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(announcementread.Table, announcementread.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, announcementread.UserTable, announcementread.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AnnouncementReadClient) Hooks() []Hook { + return c.hooks.AnnouncementRead +} + +// Interceptors returns the client interceptors. +func (c *AnnouncementReadClient) Interceptors() []Interceptor { + return c.inters.AnnouncementRead +} + +func (c *AnnouncementReadClient) mutate(ctx context.Context, m *AnnouncementReadMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AnnouncementReadCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AnnouncementReadUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AnnouncementReadUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AnnouncementReadDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AnnouncementRead mutation op: %q", m.Op()) + } +} + // GroupClient is a client for the Group schema. type GroupClient struct { config @@ -1847,6 +2187,139 @@ func (c *SettingClient) mutate(ctx context.Context, m *SettingMutation) (Value, } } +// UsageCleanupTaskClient is a client for the UsageCleanupTask schema. +type UsageCleanupTaskClient struct { + config +} + +// NewUsageCleanupTaskClient returns a client for the UsageCleanupTask from the given config. +func NewUsageCleanupTaskClient(c config) *UsageCleanupTaskClient { + return &UsageCleanupTaskClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `usagecleanuptask.Hooks(f(g(h())))`. +func (c *UsageCleanupTaskClient) Use(hooks ...Hook) { + c.hooks.UsageCleanupTask = append(c.hooks.UsageCleanupTask, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `usagecleanuptask.Intercept(f(g(h())))`. +func (c *UsageCleanupTaskClient) Intercept(interceptors ...Interceptor) { + c.inters.UsageCleanupTask = append(c.inters.UsageCleanupTask, interceptors...) +} + +// Create returns a builder for creating a UsageCleanupTask entity. +func (c *UsageCleanupTaskClient) Create() *UsageCleanupTaskCreate { + mutation := newUsageCleanupTaskMutation(c.config, OpCreate) + return &UsageCleanupTaskCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UsageCleanupTask entities. +func (c *UsageCleanupTaskClient) CreateBulk(builders ...*UsageCleanupTaskCreate) *UsageCleanupTaskCreateBulk { + return &UsageCleanupTaskCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UsageCleanupTaskClient) MapCreateBulk(slice any, setFunc func(*UsageCleanupTaskCreate, int)) *UsageCleanupTaskCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UsageCleanupTaskCreateBulk{err: fmt.Errorf("calling to UsageCleanupTaskClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UsageCleanupTaskCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UsageCleanupTaskCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UsageCleanupTask. +func (c *UsageCleanupTaskClient) Update() *UsageCleanupTaskUpdate { + mutation := newUsageCleanupTaskMutation(c.config, OpUpdate) + return &UsageCleanupTaskUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UsageCleanupTaskClient) UpdateOne(_m *UsageCleanupTask) *UsageCleanupTaskUpdateOne { + mutation := newUsageCleanupTaskMutation(c.config, OpUpdateOne, withUsageCleanupTask(_m)) + return &UsageCleanupTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UsageCleanupTaskClient) UpdateOneID(id int64) *UsageCleanupTaskUpdateOne { + mutation := newUsageCleanupTaskMutation(c.config, OpUpdateOne, withUsageCleanupTaskID(id)) + return &UsageCleanupTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UsageCleanupTask. +func (c *UsageCleanupTaskClient) Delete() *UsageCleanupTaskDelete { + mutation := newUsageCleanupTaskMutation(c.config, OpDelete) + return &UsageCleanupTaskDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UsageCleanupTaskClient) DeleteOne(_m *UsageCleanupTask) *UsageCleanupTaskDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UsageCleanupTaskClient) DeleteOneID(id int64) *UsageCleanupTaskDeleteOne { + builder := c.Delete().Where(usagecleanuptask.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UsageCleanupTaskDeleteOne{builder} +} + +// Query returns a query builder for UsageCleanupTask. +func (c *UsageCleanupTaskClient) Query() *UsageCleanupTaskQuery { + return &UsageCleanupTaskQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUsageCleanupTask}, + inters: c.Interceptors(), + } +} + +// Get returns a UsageCleanupTask entity by its id. +func (c *UsageCleanupTaskClient) Get(ctx context.Context, id int64) (*UsageCleanupTask, error) { + return c.Query().Where(usagecleanuptask.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UsageCleanupTaskClient) GetX(ctx context.Context, id int64) *UsageCleanupTask { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *UsageCleanupTaskClient) Hooks() []Hook { + return c.hooks.UsageCleanupTask +} + +// Interceptors returns the client interceptors. +func (c *UsageCleanupTaskClient) Interceptors() []Interceptor { + return c.inters.UsageCleanupTask +} + +func (c *UsageCleanupTaskClient) mutate(ctx context.Context, m *UsageCleanupTaskMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UsageCleanupTaskCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UsageCleanupTaskUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UsageCleanupTaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UsageCleanupTaskDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UsageCleanupTask mutation op: %q", m.Op()) + } +} + // UsageLogClient is a client for the UsageLog schema. type UsageLogClient struct { config @@ -2232,6 +2705,22 @@ func (c *UserClient) QueryAssignedSubscriptions(_m *User) *UserSubscriptionQuery return query } +// QueryAnnouncementReads queries the announcement_reads edge of a User. +func (c *UserClient) QueryAnnouncementReads(_m *User) *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AnnouncementReadsTable, user.AnnouncementReadsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + // QueryAllowedGroups queries the allowed_groups edge of a User. func (c *UserClient) QueryAllowedGroups(_m *User) *GroupQuery { query := (&GroupClient{config: c.config}).Query() @@ -2973,14 +3462,16 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription // hooks and interceptors per client, for fast access. type ( hooks struct { - APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, - UserAttributeValue, UserSubscription []ent.Hook + APIKey, Account, AccountGroup, Announcement, AnnouncementRead, Group, PromoCode, + PromoCodeUsage, Proxy, RedeemCode, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Hook } inters struct { - APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, - UserAttributeValue, UserSubscription []ent.Interceptor + APIKey, Account, AccountGroup, Announcement, AnnouncementRead, Group, PromoCode, + PromoCodeUsage, Proxy, RedeemCode, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index 410375a7..05e30ba7 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -14,6 +14,8 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocode" @@ -21,6 +23,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -90,12 +93,15 @@ func checkColumn(t, c string) error { apikey.Table: apikey.ValidColumn, account.Table: account.ValidColumn, accountgroup.Table: accountgroup.ValidColumn, + announcement.Table: announcement.ValidColumn, + announcementread.Table: announcementread.ValidColumn, group.Table: group.ValidColumn, promocode.Table: promocode.ValidColumn, promocodeusage.Table: promocodeusage.ValidColumn, proxy.Table: proxy.ValidColumn, redeemcode.Table: redeemcode.ValidColumn, setting.Table: setting.ValidColumn, + usagecleanuptask.Table: usagecleanuptask.ValidColumn, usagelog.Table: usagelog.ValidColumn, user.Table: user.ValidColumn, userallowedgroup.Table: userallowedgroup.ValidColumn, diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index 532b0d2c..1e653c77 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -45,6 +45,30 @@ func (f AccountGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountGroupMutation", m) } +// The AnnouncementFunc type is an adapter to allow the use of ordinary +// function as Announcement mutator. +type AnnouncementFunc func(context.Context, *ent.AnnouncementMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AnnouncementFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AnnouncementMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AnnouncementMutation", m) +} + +// The AnnouncementReadFunc type is an adapter to allow the use of ordinary +// function as AnnouncementRead mutator. +type AnnouncementReadFunc func(context.Context, *ent.AnnouncementReadMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AnnouncementReadFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AnnouncementReadMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AnnouncementReadMutation", m) +} + // The GroupFunc type is an adapter to allow the use of ordinary // function as Group mutator. type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error) @@ -117,6 +141,18 @@ func (f SettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, err return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SettingMutation", m) } +// The UsageCleanupTaskFunc type is an adapter to allow the use of ordinary +// function as UsageCleanupTask mutator. +type UsageCleanupTaskFunc func(context.Context, *ent.UsageCleanupTaskMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UsageCleanupTaskFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UsageCleanupTaskMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UsageCleanupTaskMutation", m) +} + // The UsageLogFunc type is an adapter to allow the use of ordinary // function as UsageLog mutator. type UsageLogFunc func(context.Context, *ent.UsageLogMutation) (ent.Value, error) diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 765d39b4..a37be48f 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -10,6 +10,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -18,6 +20,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -163,6 +166,60 @@ func (f TraverseAccountGroup) Traverse(ctx context.Context, q ent.Query) error { return fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q) } +// The AnnouncementFunc type is an adapter to allow the use of ordinary function as a Querier. +type AnnouncementFunc func(context.Context, *ent.AnnouncementQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AnnouncementFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AnnouncementQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementQuery", q) +} + +// The TraverseAnnouncement type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAnnouncement func(context.Context, *ent.AnnouncementQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAnnouncement) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAnnouncement) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AnnouncementQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementQuery", q) +} + +// The AnnouncementReadFunc type is an adapter to allow the use of ordinary function as a Querier. +type AnnouncementReadFunc func(context.Context, *ent.AnnouncementReadQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AnnouncementReadFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AnnouncementReadQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementReadQuery", q) +} + +// The TraverseAnnouncementRead type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAnnouncementRead func(context.Context, *ent.AnnouncementReadQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAnnouncementRead) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAnnouncementRead) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AnnouncementReadQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementReadQuery", q) +} + // The GroupFunc type is an adapter to allow the use of ordinary function as a Querier. type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error) @@ -325,6 +382,33 @@ func (f TraverseSetting) Traverse(ctx context.Context, q ent.Query) error { return fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q) } +// The UsageCleanupTaskFunc type is an adapter to allow the use of ordinary function as a Querier. +type UsageCleanupTaskFunc func(context.Context, *ent.UsageCleanupTaskQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UsageCleanupTaskFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UsageCleanupTaskQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UsageCleanupTaskQuery", q) +} + +// The TraverseUsageCleanupTask type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUsageCleanupTask func(context.Context, *ent.UsageCleanupTaskQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUsageCleanupTask) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUsageCleanupTask) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UsageCleanupTaskQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UsageCleanupTaskQuery", q) +} + // The UsageLogFunc type is an adapter to allow the use of ordinary function as a Querier. type UsageLogFunc func(context.Context, *ent.UsageLogQuery) (ent.Value, error) @@ -496,6 +580,10 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.AccountQuery, predicate.Account, account.OrderOption]{typ: ent.TypeAccount, tq: q}, nil case *ent.AccountGroupQuery: return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil + case *ent.AnnouncementQuery: + return &query[*ent.AnnouncementQuery, predicate.Announcement, announcement.OrderOption]{typ: ent.TypeAnnouncement, tq: q}, nil + case *ent.AnnouncementReadQuery: + return &query[*ent.AnnouncementReadQuery, predicate.AnnouncementRead, announcementread.OrderOption]{typ: ent.TypeAnnouncementRead, tq: q}, nil case *ent.GroupQuery: return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil case *ent.PromoCodeQuery: @@ -508,6 +596,8 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.RedeemCodeQuery, predicate.RedeemCode, redeemcode.OrderOption]{typ: ent.TypeRedeemCode, tq: q}, nil case *ent.SettingQuery: return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil + case *ent.UsageCleanupTaskQuery: + return &query[*ent.UsageCleanupTaskQuery, predicate.UsageCleanupTask, usagecleanuptask.OrderOption]{typ: ent.TypeUsageCleanupTask, tq: q}, nil case *ent.UsageLogQuery: return &query[*ent.UsageLogQuery, predicate.UsageLog, usagelog.OrderOption]{typ: ent.TypeUsageLog, tq: q}, nil case *ent.UserQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index 3b83061e..434916f8 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -204,6 +204,98 @@ var ( }, }, } + // AnnouncementsColumns holds the columns for the "announcements" table. + AnnouncementsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "title", Type: field.TypeString, Size: 200}, + {Name: "content", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "draft"}, + {Name: "targeting", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "starts_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "ends_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "created_by", Type: field.TypeInt64, Nullable: true}, + {Name: "updated_by", Type: field.TypeInt64, Nullable: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + } + // AnnouncementsTable holds the schema information for the "announcements" table. + AnnouncementsTable = &schema.Table{ + Name: "announcements", + Columns: AnnouncementsColumns, + PrimaryKey: []*schema.Column{AnnouncementsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "announcement_status", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[3]}, + }, + { + Name: "announcement_created_at", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[9]}, + }, + { + Name: "announcement_starts_at", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[5]}, + }, + { + Name: "announcement_ends_at", + Unique: false, + Columns: []*schema.Column{AnnouncementsColumns[6]}, + }, + }, + } + // AnnouncementReadsColumns holds the columns for the "announcement_reads" table. + AnnouncementReadsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "read_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "announcement_id", Type: field.TypeInt64}, + {Name: "user_id", Type: field.TypeInt64}, + } + // AnnouncementReadsTable holds the schema information for the "announcement_reads" table. + AnnouncementReadsTable = &schema.Table{ + Name: "announcement_reads", + Columns: AnnouncementReadsColumns, + PrimaryKey: []*schema.Column{AnnouncementReadsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "announcement_reads_announcements_reads", + Columns: []*schema.Column{AnnouncementReadsColumns[3]}, + RefColumns: []*schema.Column{AnnouncementsColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "announcement_reads_users_announcement_reads", + Columns: []*schema.Column{AnnouncementReadsColumns[4]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "announcementread_announcement_id", + Unique: false, + Columns: []*schema.Column{AnnouncementReadsColumns[3]}, + }, + { + Name: "announcementread_user_id", + Unique: false, + Columns: []*schema.Column{AnnouncementReadsColumns[4]}, + }, + { + Name: "announcementread_read_at", + Unique: false, + Columns: []*schema.Column{AnnouncementReadsColumns[1]}, + }, + { + Name: "announcementread_announcement_id_user_id", + Unique: true, + Columns: []*schema.Column{AnnouncementReadsColumns[3], AnnouncementReadsColumns[4]}, + }, + }, + } // GroupsColumns holds the columns for the "groups" table. GroupsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -436,6 +528,44 @@ var ( Columns: SettingsColumns, PrimaryKey: []*schema.Column{SettingsColumns[0]}, } + // UsageCleanupTasksColumns holds the columns for the "usage_cleanup_tasks" table. + UsageCleanupTasksColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "status", Type: field.TypeString, Size: 20}, + {Name: "filters", Type: field.TypeJSON}, + {Name: "created_by", Type: field.TypeInt64}, + {Name: "deleted_rows", Type: field.TypeInt64, Default: 0}, + {Name: "error_message", Type: field.TypeString, Nullable: true}, + {Name: "canceled_by", Type: field.TypeInt64, Nullable: true}, + {Name: "canceled_at", Type: field.TypeTime, Nullable: true}, + {Name: "started_at", Type: field.TypeTime, Nullable: true}, + {Name: "finished_at", Type: field.TypeTime, Nullable: true}, + } + // UsageCleanupTasksTable holds the schema information for the "usage_cleanup_tasks" table. + UsageCleanupTasksTable = &schema.Table{ + Name: "usage_cleanup_tasks", + Columns: UsageCleanupTasksColumns, + PrimaryKey: []*schema.Column{UsageCleanupTasksColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "usagecleanuptask_status_created_at", + Unique: false, + Columns: []*schema.Column{UsageCleanupTasksColumns[3], UsageCleanupTasksColumns[1]}, + }, + { + Name: "usagecleanuptask_created_at", + Unique: false, + Columns: []*schema.Column{UsageCleanupTasksColumns[1]}, + }, + { + Name: "usagecleanuptask_canceled_at", + Unique: false, + Columns: []*schema.Column{UsageCleanupTasksColumns[9]}, + }, + }, + } // UsageLogsColumns holds the columns for the "usage_logs" table. UsageLogsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -574,6 +704,9 @@ var ( {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, {Name: "username", Type: field.TypeString, Size: 100, Default: ""}, {Name: "notes", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}}, + {Name: "totp_secret_encrypted", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "totp_enabled", Type: field.TypeBool, Default: false}, + {Name: "totp_enabled_at", Type: field.TypeTime, Nullable: true}, } // UsersTable holds the schema information for the "users" table. UsersTable = &schema.Table{ @@ -801,12 +934,15 @@ var ( APIKeysTable, AccountsTable, AccountGroupsTable, + AnnouncementsTable, + AnnouncementReadsTable, GroupsTable, PromoCodesTable, PromoCodeUsagesTable, ProxiesTable, RedeemCodesTable, SettingsTable, + UsageCleanupTasksTable, UsageLogsTable, UsersTable, UserAllowedGroupsTable, @@ -831,6 +967,14 @@ func init() { AccountGroupsTable.Annotation = &entsql.Annotation{ Table: "account_groups", } + AnnouncementsTable.Annotation = &entsql.Annotation{ + Table: "announcements", + } + AnnouncementReadsTable.ForeignKeys[0].RefTable = AnnouncementsTable + AnnouncementReadsTable.ForeignKeys[1].RefTable = UsersTable + AnnouncementReadsTable.Annotation = &entsql.Annotation{ + Table: "announcement_reads", + } GroupsTable.Annotation = &entsql.Annotation{ Table: "groups", } @@ -853,6 +997,9 @@ func init() { SettingsTable.Annotation = &entsql.Annotation{ Table: "settings", } + UsageCleanupTasksTable.Annotation = &entsql.Annotation{ + Table: "usage_cleanup_tasks", + } UsageLogsTable.ForeignKeys[0].RefTable = APIKeysTable UsageLogsTable.ForeignKeys[1].RefTable = AccountsTable UsageLogsTable.ForeignKeys[2].RefTable = GroupsTable diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 98195985..cb654b7b 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -4,6 +4,7 @@ package ent import ( "context" + "encoding/json" "errors" "fmt" "sync" @@ -13,6 +14,8 @@ import ( "entgo.io/ent/dialect/sql" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -21,12 +24,14 @@ import ( "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" "github.com/Wei-Shaw/sub2api/ent/userattributevalue" "github.com/Wei-Shaw/sub2api/ent/usersubscription" + "github.com/Wei-Shaw/sub2api/internal/domain" ) const ( @@ -41,12 +46,15 @@ const ( TypeAPIKey = "APIKey" TypeAccount = "Account" TypeAccountGroup = "AccountGroup" + TypeAnnouncement = "Announcement" + TypeAnnouncementRead = "AnnouncementRead" TypeGroup = "Group" TypePromoCode = "PromoCode" TypePromoCodeUsage = "PromoCodeUsage" TypeProxy = "Proxy" TypeRedeemCode = "RedeemCode" TypeSetting = "Setting" + TypeUsageCleanupTask = "UsageCleanupTask" TypeUsageLog = "UsageLog" TypeUser = "User" TypeUserAllowedGroup = "UserAllowedGroup" @@ -3830,6 +3838,1671 @@ func (m *AccountGroupMutation) ResetEdge(name string) error { return fmt.Errorf("unknown AccountGroup edge %s", name) } +// AnnouncementMutation represents an operation that mutates the Announcement nodes in the graph. +type AnnouncementMutation struct { + config + op Op + typ string + id *int64 + title *string + content *string + status *string + targeting *domain.AnnouncementTargeting + starts_at *time.Time + ends_at *time.Time + created_by *int64 + addcreated_by *int64 + updated_by *int64 + addupdated_by *int64 + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + reads map[int64]struct{} + removedreads map[int64]struct{} + clearedreads bool + done bool + oldValue func(context.Context) (*Announcement, error) + predicates []predicate.Announcement +} + +var _ ent.Mutation = (*AnnouncementMutation)(nil) + +// announcementOption allows management of the mutation configuration using functional options. +type announcementOption func(*AnnouncementMutation) + +// newAnnouncementMutation creates new mutation for the Announcement entity. +func newAnnouncementMutation(c config, op Op, opts ...announcementOption) *AnnouncementMutation { + m := &AnnouncementMutation{ + config: c, + op: op, + typ: TypeAnnouncement, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAnnouncementID sets the ID field of the mutation. +func withAnnouncementID(id int64) announcementOption { + return func(m *AnnouncementMutation) { + var ( + err error + once sync.Once + value *Announcement + ) + m.oldValue = func(ctx context.Context) (*Announcement, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Announcement.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAnnouncement sets the old Announcement of the mutation. +func withAnnouncement(node *Announcement) announcementOption { + return func(m *AnnouncementMutation) { + m.oldValue = func(context.Context) (*Announcement, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AnnouncementMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AnnouncementMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AnnouncementMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AnnouncementMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Announcement.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetTitle sets the "title" field. +func (m *AnnouncementMutation) SetTitle(s string) { + m.title = &s +} + +// Title returns the value of the "title" field in the mutation. +func (m *AnnouncementMutation) Title() (r string, exists bool) { + v := m.title + if v == nil { + return + } + return *v, true +} + +// OldTitle returns the old "title" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldTitle(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTitle is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTitle requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTitle: %w", err) + } + return oldValue.Title, nil +} + +// ResetTitle resets all changes to the "title" field. +func (m *AnnouncementMutation) ResetTitle() { + m.title = nil +} + +// SetContent sets the "content" field. +func (m *AnnouncementMutation) SetContent(s string) { + m.content = &s +} + +// Content returns the value of the "content" field in the mutation. +func (m *AnnouncementMutation) Content() (r string, exists bool) { + v := m.content + if v == nil { + return + } + return *v, true +} + +// OldContent returns the old "content" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldContent(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldContent is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldContent requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldContent: %w", err) + } + return oldValue.Content, nil +} + +// ResetContent resets all changes to the "content" field. +func (m *AnnouncementMutation) ResetContent() { + m.content = nil +} + +// SetStatus sets the "status" field. +func (m *AnnouncementMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *AnnouncementMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *AnnouncementMutation) ResetStatus() { + m.status = nil +} + +// SetTargeting sets the "targeting" field. +func (m *AnnouncementMutation) SetTargeting(dt domain.AnnouncementTargeting) { + m.targeting = &dt +} + +// Targeting returns the value of the "targeting" field in the mutation. +func (m *AnnouncementMutation) Targeting() (r domain.AnnouncementTargeting, exists bool) { + v := m.targeting + if v == nil { + return + } + return *v, true +} + +// OldTargeting returns the old "targeting" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldTargeting(ctx context.Context) (v domain.AnnouncementTargeting, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTargeting is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTargeting requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTargeting: %w", err) + } + return oldValue.Targeting, nil +} + +// ClearTargeting clears the value of the "targeting" field. +func (m *AnnouncementMutation) ClearTargeting() { + m.targeting = nil + m.clearedFields[announcement.FieldTargeting] = struct{}{} +} + +// TargetingCleared returns if the "targeting" field was cleared in this mutation. +func (m *AnnouncementMutation) TargetingCleared() bool { + _, ok := m.clearedFields[announcement.FieldTargeting] + return ok +} + +// ResetTargeting resets all changes to the "targeting" field. +func (m *AnnouncementMutation) ResetTargeting() { + m.targeting = nil + delete(m.clearedFields, announcement.FieldTargeting) +} + +// SetStartsAt sets the "starts_at" field. +func (m *AnnouncementMutation) SetStartsAt(t time.Time) { + m.starts_at = &t +} + +// StartsAt returns the value of the "starts_at" field in the mutation. +func (m *AnnouncementMutation) StartsAt() (r time.Time, exists bool) { + v := m.starts_at + if v == nil { + return + } + return *v, true +} + +// OldStartsAt returns the old "starts_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldStartsAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartsAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartsAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartsAt: %w", err) + } + return oldValue.StartsAt, nil +} + +// ClearStartsAt clears the value of the "starts_at" field. +func (m *AnnouncementMutation) ClearStartsAt() { + m.starts_at = nil + m.clearedFields[announcement.FieldStartsAt] = struct{}{} +} + +// StartsAtCleared returns if the "starts_at" field was cleared in this mutation. +func (m *AnnouncementMutation) StartsAtCleared() bool { + _, ok := m.clearedFields[announcement.FieldStartsAt] + return ok +} + +// ResetStartsAt resets all changes to the "starts_at" field. +func (m *AnnouncementMutation) ResetStartsAt() { + m.starts_at = nil + delete(m.clearedFields, announcement.FieldStartsAt) +} + +// SetEndsAt sets the "ends_at" field. +func (m *AnnouncementMutation) SetEndsAt(t time.Time) { + m.ends_at = &t +} + +// EndsAt returns the value of the "ends_at" field in the mutation. +func (m *AnnouncementMutation) EndsAt() (r time.Time, exists bool) { + v := m.ends_at + if v == nil { + return + } + return *v, true +} + +// OldEndsAt returns the old "ends_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldEndsAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndsAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndsAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndsAt: %w", err) + } + return oldValue.EndsAt, nil +} + +// ClearEndsAt clears the value of the "ends_at" field. +func (m *AnnouncementMutation) ClearEndsAt() { + m.ends_at = nil + m.clearedFields[announcement.FieldEndsAt] = struct{}{} +} + +// EndsAtCleared returns if the "ends_at" field was cleared in this mutation. +func (m *AnnouncementMutation) EndsAtCleared() bool { + _, ok := m.clearedFields[announcement.FieldEndsAt] + return ok +} + +// ResetEndsAt resets all changes to the "ends_at" field. +func (m *AnnouncementMutation) ResetEndsAt() { + m.ends_at = nil + delete(m.clearedFields, announcement.FieldEndsAt) +} + +// SetCreatedBy sets the "created_by" field. +func (m *AnnouncementMutation) SetCreatedBy(i int64) { + m.created_by = &i + m.addcreated_by = nil +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *AnnouncementMutation) CreatedBy() (r int64, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldCreatedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// AddCreatedBy adds i to the "created_by" field. +func (m *AnnouncementMutation) AddCreatedBy(i int64) { + if m.addcreated_by != nil { + *m.addcreated_by += i + } else { + m.addcreated_by = &i + } +} + +// AddedCreatedBy returns the value that was added to the "created_by" field in this mutation. +func (m *AnnouncementMutation) AddedCreatedBy() (r int64, exists bool) { + v := m.addcreated_by + if v == nil { + return + } + return *v, true +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (m *AnnouncementMutation) ClearCreatedBy() { + m.created_by = nil + m.addcreated_by = nil + m.clearedFields[announcement.FieldCreatedBy] = struct{}{} +} + +// CreatedByCleared returns if the "created_by" field was cleared in this mutation. +func (m *AnnouncementMutation) CreatedByCleared() bool { + _, ok := m.clearedFields[announcement.FieldCreatedBy] + return ok +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *AnnouncementMutation) ResetCreatedBy() { + m.created_by = nil + m.addcreated_by = nil + delete(m.clearedFields, announcement.FieldCreatedBy) +} + +// SetUpdatedBy sets the "updated_by" field. +func (m *AnnouncementMutation) SetUpdatedBy(i int64) { + m.updated_by = &i + m.addupdated_by = nil +} + +// UpdatedBy returns the value of the "updated_by" field in the mutation. +func (m *AnnouncementMutation) UpdatedBy() (r int64, exists bool) { + v := m.updated_by + if v == nil { + return + } + return *v, true +} + +// OldUpdatedBy returns the old "updated_by" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldUpdatedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedBy: %w", err) + } + return oldValue.UpdatedBy, nil +} + +// AddUpdatedBy adds i to the "updated_by" field. +func (m *AnnouncementMutation) AddUpdatedBy(i int64) { + if m.addupdated_by != nil { + *m.addupdated_by += i + } else { + m.addupdated_by = &i + } +} + +// AddedUpdatedBy returns the value that was added to the "updated_by" field in this mutation. +func (m *AnnouncementMutation) AddedUpdatedBy() (r int64, exists bool) { + v := m.addupdated_by + if v == nil { + return + } + return *v, true +} + +// ClearUpdatedBy clears the value of the "updated_by" field. +func (m *AnnouncementMutation) ClearUpdatedBy() { + m.updated_by = nil + m.addupdated_by = nil + m.clearedFields[announcement.FieldUpdatedBy] = struct{}{} +} + +// UpdatedByCleared returns if the "updated_by" field was cleared in this mutation. +func (m *AnnouncementMutation) UpdatedByCleared() bool { + _, ok := m.clearedFields[announcement.FieldUpdatedBy] + return ok +} + +// ResetUpdatedBy resets all changes to the "updated_by" field. +func (m *AnnouncementMutation) ResetUpdatedBy() { + m.updated_by = nil + m.addupdated_by = nil + delete(m.clearedFields, announcement.FieldUpdatedBy) +} + +// SetCreatedAt sets the "created_at" field. +func (m *AnnouncementMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AnnouncementMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AnnouncementMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AnnouncementMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AnnouncementMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Announcement entity. +// If the Announcement object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AnnouncementMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by ids. +func (m *AnnouncementMutation) AddReadIDs(ids ...int64) { + if m.reads == nil { + m.reads = make(map[int64]struct{}) + } + for i := range ids { + m.reads[ids[i]] = struct{}{} + } +} + +// ClearReads clears the "reads" edge to the AnnouncementRead entity. +func (m *AnnouncementMutation) ClearReads() { + m.clearedreads = true +} + +// ReadsCleared reports if the "reads" edge to the AnnouncementRead entity was cleared. +func (m *AnnouncementMutation) ReadsCleared() bool { + return m.clearedreads +} + +// RemoveReadIDs removes the "reads" edge to the AnnouncementRead entity by IDs. +func (m *AnnouncementMutation) RemoveReadIDs(ids ...int64) { + if m.removedreads == nil { + m.removedreads = make(map[int64]struct{}) + } + for i := range ids { + delete(m.reads, ids[i]) + m.removedreads[ids[i]] = struct{}{} + } +} + +// RemovedReads returns the removed IDs of the "reads" edge to the AnnouncementRead entity. +func (m *AnnouncementMutation) RemovedReadsIDs() (ids []int64) { + for id := range m.removedreads { + ids = append(ids, id) + } + return +} + +// ReadsIDs returns the "reads" edge IDs in the mutation. +func (m *AnnouncementMutation) ReadsIDs() (ids []int64) { + for id := range m.reads { + ids = append(ids, id) + } + return +} + +// ResetReads resets all changes to the "reads" edge. +func (m *AnnouncementMutation) ResetReads() { + m.reads = nil + m.clearedreads = false + m.removedreads = nil +} + +// Where appends a list predicates to the AnnouncementMutation builder. +func (m *AnnouncementMutation) Where(ps ...predicate.Announcement) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AnnouncementMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AnnouncementMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Announcement, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AnnouncementMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AnnouncementMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Announcement). +func (m *AnnouncementMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AnnouncementMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.title != nil { + fields = append(fields, announcement.FieldTitle) + } + if m.content != nil { + fields = append(fields, announcement.FieldContent) + } + if m.status != nil { + fields = append(fields, announcement.FieldStatus) + } + if m.targeting != nil { + fields = append(fields, announcement.FieldTargeting) + } + if m.starts_at != nil { + fields = append(fields, announcement.FieldStartsAt) + } + if m.ends_at != nil { + fields = append(fields, announcement.FieldEndsAt) + } + if m.created_by != nil { + fields = append(fields, announcement.FieldCreatedBy) + } + if m.updated_by != nil { + fields = append(fields, announcement.FieldUpdatedBy) + } + if m.created_at != nil { + fields = append(fields, announcement.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, announcement.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AnnouncementMutation) Field(name string) (ent.Value, bool) { + switch name { + case announcement.FieldTitle: + return m.Title() + case announcement.FieldContent: + return m.Content() + case announcement.FieldStatus: + return m.Status() + case announcement.FieldTargeting: + return m.Targeting() + case announcement.FieldStartsAt: + return m.StartsAt() + case announcement.FieldEndsAt: + return m.EndsAt() + case announcement.FieldCreatedBy: + return m.CreatedBy() + case announcement.FieldUpdatedBy: + return m.UpdatedBy() + case announcement.FieldCreatedAt: + return m.CreatedAt() + case announcement.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AnnouncementMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case announcement.FieldTitle: + return m.OldTitle(ctx) + case announcement.FieldContent: + return m.OldContent(ctx) + case announcement.FieldStatus: + return m.OldStatus(ctx) + case announcement.FieldTargeting: + return m.OldTargeting(ctx) + case announcement.FieldStartsAt: + return m.OldStartsAt(ctx) + case announcement.FieldEndsAt: + return m.OldEndsAt(ctx) + case announcement.FieldCreatedBy: + return m.OldCreatedBy(ctx) + case announcement.FieldUpdatedBy: + return m.OldUpdatedBy(ctx) + case announcement.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case announcement.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Announcement field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementMutation) SetField(name string, value ent.Value) error { + switch name { + case announcement.FieldTitle: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTitle(v) + return nil + case announcement.FieldContent: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetContent(v) + return nil + case announcement.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case announcement.FieldTargeting: + v, ok := value.(domain.AnnouncementTargeting) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTargeting(v) + return nil + case announcement.FieldStartsAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartsAt(v) + return nil + case announcement.FieldEndsAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndsAt(v) + return nil + case announcement.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + case announcement.FieldUpdatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedBy(v) + return nil + case announcement.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case announcement.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Announcement field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AnnouncementMutation) AddedFields() []string { + var fields []string + if m.addcreated_by != nil { + fields = append(fields, announcement.FieldCreatedBy) + } + if m.addupdated_by != nil { + fields = append(fields, announcement.FieldUpdatedBy) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AnnouncementMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case announcement.FieldCreatedBy: + return m.AddedCreatedBy() + case announcement.FieldUpdatedBy: + return m.AddedUpdatedBy() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementMutation) AddField(name string, value ent.Value) error { + switch name { + case announcement.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCreatedBy(v) + return nil + case announcement.FieldUpdatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddUpdatedBy(v) + return nil + } + return fmt.Errorf("unknown Announcement numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AnnouncementMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(announcement.FieldTargeting) { + fields = append(fields, announcement.FieldTargeting) + } + if m.FieldCleared(announcement.FieldStartsAt) { + fields = append(fields, announcement.FieldStartsAt) + } + if m.FieldCleared(announcement.FieldEndsAt) { + fields = append(fields, announcement.FieldEndsAt) + } + if m.FieldCleared(announcement.FieldCreatedBy) { + fields = append(fields, announcement.FieldCreatedBy) + } + if m.FieldCleared(announcement.FieldUpdatedBy) { + fields = append(fields, announcement.FieldUpdatedBy) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AnnouncementMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AnnouncementMutation) ClearField(name string) error { + switch name { + case announcement.FieldTargeting: + m.ClearTargeting() + return nil + case announcement.FieldStartsAt: + m.ClearStartsAt() + return nil + case announcement.FieldEndsAt: + m.ClearEndsAt() + return nil + case announcement.FieldCreatedBy: + m.ClearCreatedBy() + return nil + case announcement.FieldUpdatedBy: + m.ClearUpdatedBy() + return nil + } + return fmt.Errorf("unknown Announcement nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AnnouncementMutation) ResetField(name string) error { + switch name { + case announcement.FieldTitle: + m.ResetTitle() + return nil + case announcement.FieldContent: + m.ResetContent() + return nil + case announcement.FieldStatus: + m.ResetStatus() + return nil + case announcement.FieldTargeting: + m.ResetTargeting() + return nil + case announcement.FieldStartsAt: + m.ResetStartsAt() + return nil + case announcement.FieldEndsAt: + m.ResetEndsAt() + return nil + case announcement.FieldCreatedBy: + m.ResetCreatedBy() + return nil + case announcement.FieldUpdatedBy: + m.ResetUpdatedBy() + return nil + case announcement.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case announcement.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Announcement field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AnnouncementMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.reads != nil { + edges = append(edges, announcement.EdgeReads) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AnnouncementMutation) AddedIDs(name string) []ent.Value { + switch name { + case announcement.EdgeReads: + ids := make([]ent.Value, 0, len(m.reads)) + for id := range m.reads { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AnnouncementMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedreads != nil { + edges = append(edges, announcement.EdgeReads) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AnnouncementMutation) RemovedIDs(name string) []ent.Value { + switch name { + case announcement.EdgeReads: + ids := make([]ent.Value, 0, len(m.removedreads)) + for id := range m.removedreads { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AnnouncementMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedreads { + edges = append(edges, announcement.EdgeReads) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AnnouncementMutation) EdgeCleared(name string) bool { + switch name { + case announcement.EdgeReads: + return m.clearedreads + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AnnouncementMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Announcement unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AnnouncementMutation) ResetEdge(name string) error { + switch name { + case announcement.EdgeReads: + m.ResetReads() + return nil + } + return fmt.Errorf("unknown Announcement edge %s", name) +} + +// AnnouncementReadMutation represents an operation that mutates the AnnouncementRead nodes in the graph. +type AnnouncementReadMutation struct { + config + op Op + typ string + id *int64 + read_at *time.Time + created_at *time.Time + clearedFields map[string]struct{} + announcement *int64 + clearedannouncement bool + user *int64 + cleareduser bool + done bool + oldValue func(context.Context) (*AnnouncementRead, error) + predicates []predicate.AnnouncementRead +} + +var _ ent.Mutation = (*AnnouncementReadMutation)(nil) + +// announcementreadOption allows management of the mutation configuration using functional options. +type announcementreadOption func(*AnnouncementReadMutation) + +// newAnnouncementReadMutation creates new mutation for the AnnouncementRead entity. +func newAnnouncementReadMutation(c config, op Op, opts ...announcementreadOption) *AnnouncementReadMutation { + m := &AnnouncementReadMutation{ + config: c, + op: op, + typ: TypeAnnouncementRead, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAnnouncementReadID sets the ID field of the mutation. +func withAnnouncementReadID(id int64) announcementreadOption { + return func(m *AnnouncementReadMutation) { + var ( + err error + once sync.Once + value *AnnouncementRead + ) + m.oldValue = func(ctx context.Context) (*AnnouncementRead, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().AnnouncementRead.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAnnouncementRead sets the old AnnouncementRead of the mutation. +func withAnnouncementRead(node *AnnouncementRead) announcementreadOption { + return func(m *AnnouncementReadMutation) { + m.oldValue = func(context.Context) (*AnnouncementRead, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AnnouncementReadMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AnnouncementReadMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AnnouncementReadMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AnnouncementReadMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().AnnouncementRead.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetAnnouncementID sets the "announcement_id" field. +func (m *AnnouncementReadMutation) SetAnnouncementID(i int64) { + m.announcement = &i +} + +// AnnouncementID returns the value of the "announcement_id" field in the mutation. +func (m *AnnouncementReadMutation) AnnouncementID() (r int64, exists bool) { + v := m.announcement + if v == nil { + return + } + return *v, true +} + +// OldAnnouncementID returns the old "announcement_id" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldAnnouncementID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAnnouncementID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAnnouncementID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAnnouncementID: %w", err) + } + return oldValue.AnnouncementID, nil +} + +// ResetAnnouncementID resets all changes to the "announcement_id" field. +func (m *AnnouncementReadMutation) ResetAnnouncementID() { + m.announcement = nil +} + +// SetUserID sets the "user_id" field. +func (m *AnnouncementReadMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *AnnouncementReadMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *AnnouncementReadMutation) ResetUserID() { + m.user = nil +} + +// SetReadAt sets the "read_at" field. +func (m *AnnouncementReadMutation) SetReadAt(t time.Time) { + m.read_at = &t +} + +// ReadAt returns the value of the "read_at" field in the mutation. +func (m *AnnouncementReadMutation) ReadAt() (r time.Time, exists bool) { + v := m.read_at + if v == nil { + return + } + return *v, true +} + +// OldReadAt returns the old "read_at" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldReadAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldReadAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldReadAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldReadAt: %w", err) + } + return oldValue.ReadAt, nil +} + +// ResetReadAt resets all changes to the "read_at" field. +func (m *AnnouncementReadMutation) ResetReadAt() { + m.read_at = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *AnnouncementReadMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AnnouncementReadMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the AnnouncementRead entity. +// If the AnnouncementRead object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AnnouncementReadMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AnnouncementReadMutation) ResetCreatedAt() { + m.created_at = nil +} + +// ClearAnnouncement clears the "announcement" edge to the Announcement entity. +func (m *AnnouncementReadMutation) ClearAnnouncement() { + m.clearedannouncement = true + m.clearedFields[announcementread.FieldAnnouncementID] = struct{}{} +} + +// AnnouncementCleared reports if the "announcement" edge to the Announcement entity was cleared. +func (m *AnnouncementReadMutation) AnnouncementCleared() bool { + return m.clearedannouncement +} + +// AnnouncementIDs returns the "announcement" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AnnouncementID instead. It exists only for internal usage by the builders. +func (m *AnnouncementReadMutation) AnnouncementIDs() (ids []int64) { + if id := m.announcement; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAnnouncement resets all changes to the "announcement" edge. +func (m *AnnouncementReadMutation) ResetAnnouncement() { + m.announcement = nil + m.clearedannouncement = false +} + +// ClearUser clears the "user" edge to the User entity. +func (m *AnnouncementReadMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[announcementread.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *AnnouncementReadMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *AnnouncementReadMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *AnnouncementReadMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// Where appends a list predicates to the AnnouncementReadMutation builder. +func (m *AnnouncementReadMutation) Where(ps ...predicate.AnnouncementRead) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AnnouncementReadMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AnnouncementReadMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AnnouncementRead, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AnnouncementReadMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AnnouncementReadMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AnnouncementRead). +func (m *AnnouncementReadMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AnnouncementReadMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.announcement != nil { + fields = append(fields, announcementread.FieldAnnouncementID) + } + if m.user != nil { + fields = append(fields, announcementread.FieldUserID) + } + if m.read_at != nil { + fields = append(fields, announcementread.FieldReadAt) + } + if m.created_at != nil { + fields = append(fields, announcementread.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AnnouncementReadMutation) Field(name string) (ent.Value, bool) { + switch name { + case announcementread.FieldAnnouncementID: + return m.AnnouncementID() + case announcementread.FieldUserID: + return m.UserID() + case announcementread.FieldReadAt: + return m.ReadAt() + case announcementread.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AnnouncementReadMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case announcementread.FieldAnnouncementID: + return m.OldAnnouncementID(ctx) + case announcementread.FieldUserID: + return m.OldUserID(ctx) + case announcementread.FieldReadAt: + return m.OldReadAt(ctx) + case announcementread.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown AnnouncementRead field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementReadMutation) SetField(name string, value ent.Value) error { + switch name { + case announcementread.FieldAnnouncementID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAnnouncementID(v) + return nil + case announcementread.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case announcementread.FieldReadAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetReadAt(v) + return nil + case announcementread.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown AnnouncementRead field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AnnouncementReadMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AnnouncementReadMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AnnouncementReadMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown AnnouncementRead numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AnnouncementReadMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AnnouncementReadMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AnnouncementReadMutation) ClearField(name string) error { + return fmt.Errorf("unknown AnnouncementRead nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AnnouncementReadMutation) ResetField(name string) error { + switch name { + case announcementread.FieldAnnouncementID: + m.ResetAnnouncementID() + return nil + case announcementread.FieldUserID: + m.ResetUserID() + return nil + case announcementread.FieldReadAt: + m.ResetReadAt() + return nil + case announcementread.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown AnnouncementRead field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AnnouncementReadMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.announcement != nil { + edges = append(edges, announcementread.EdgeAnnouncement) + } + if m.user != nil { + edges = append(edges, announcementread.EdgeUser) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AnnouncementReadMutation) AddedIDs(name string) []ent.Value { + switch name { + case announcementread.EdgeAnnouncement: + if id := m.announcement; id != nil { + return []ent.Value{*id} + } + case announcementread.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AnnouncementReadMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AnnouncementReadMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AnnouncementReadMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedannouncement { + edges = append(edges, announcementread.EdgeAnnouncement) + } + if m.cleareduser { + edges = append(edges, announcementread.EdgeUser) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AnnouncementReadMutation) EdgeCleared(name string) bool { + switch name { + case announcementread.EdgeAnnouncement: + return m.clearedannouncement + case announcementread.EdgeUser: + return m.cleareduser + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AnnouncementReadMutation) ClearEdge(name string) error { + switch name { + case announcementread.EdgeAnnouncement: + m.ClearAnnouncement() + return nil + case announcementread.EdgeUser: + m.ClearUser() + return nil + } + return fmt.Errorf("unknown AnnouncementRead unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AnnouncementReadMutation) ResetEdge(name string) error { + switch name { + case announcementread.EdgeAnnouncement: + m.ResetAnnouncement() + return nil + case announcementread.EdgeUser: + m.ResetUser() + return nil + } + return fmt.Errorf("unknown AnnouncementRead edge %s", name) +} + // GroupMutation represents an operation that mutates the Group nodes in the graph. type GroupMutation struct { config @@ -10531,6 +12204,1089 @@ func (m *SettingMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Setting edge %s", name) } +// UsageCleanupTaskMutation represents an operation that mutates the UsageCleanupTask nodes in the graph. +type UsageCleanupTaskMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + status *string + filters *json.RawMessage + appendfilters json.RawMessage + created_by *int64 + addcreated_by *int64 + deleted_rows *int64 + adddeleted_rows *int64 + error_message *string + canceled_by *int64 + addcanceled_by *int64 + canceled_at *time.Time + started_at *time.Time + finished_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*UsageCleanupTask, error) + predicates []predicate.UsageCleanupTask +} + +var _ ent.Mutation = (*UsageCleanupTaskMutation)(nil) + +// usagecleanuptaskOption allows management of the mutation configuration using functional options. +type usagecleanuptaskOption func(*UsageCleanupTaskMutation) + +// newUsageCleanupTaskMutation creates new mutation for the UsageCleanupTask entity. +func newUsageCleanupTaskMutation(c config, op Op, opts ...usagecleanuptaskOption) *UsageCleanupTaskMutation { + m := &UsageCleanupTaskMutation{ + config: c, + op: op, + typ: TypeUsageCleanupTask, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUsageCleanupTaskID sets the ID field of the mutation. +func withUsageCleanupTaskID(id int64) usagecleanuptaskOption { + return func(m *UsageCleanupTaskMutation) { + var ( + err error + once sync.Once + value *UsageCleanupTask + ) + m.oldValue = func(ctx context.Context) (*UsageCleanupTask, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UsageCleanupTask.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUsageCleanupTask sets the old UsageCleanupTask of the mutation. +func withUsageCleanupTask(node *UsageCleanupTask) usagecleanuptaskOption { + return func(m *UsageCleanupTaskMutation) { + m.oldValue = func(context.Context) (*UsageCleanupTask, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UsageCleanupTaskMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UsageCleanupTaskMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UsageCleanupTaskMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UsageCleanupTaskMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UsageCleanupTask.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UsageCleanupTaskMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UsageCleanupTaskMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UsageCleanupTaskMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UsageCleanupTaskMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UsageCleanupTaskMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UsageCleanupTaskMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetStatus sets the "status" field. +func (m *UsageCleanupTaskMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *UsageCleanupTaskMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *UsageCleanupTaskMutation) ResetStatus() { + m.status = nil +} + +// SetFilters sets the "filters" field. +func (m *UsageCleanupTaskMutation) SetFilters(jm json.RawMessage) { + m.filters = &jm + m.appendfilters = nil +} + +// Filters returns the value of the "filters" field in the mutation. +func (m *UsageCleanupTaskMutation) Filters() (r json.RawMessage, exists bool) { + v := m.filters + if v == nil { + return + } + return *v, true +} + +// OldFilters returns the old "filters" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldFilters(ctx context.Context) (v json.RawMessage, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFilters is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFilters requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFilters: %w", err) + } + return oldValue.Filters, nil +} + +// AppendFilters adds jm to the "filters" field. +func (m *UsageCleanupTaskMutation) AppendFilters(jm json.RawMessage) { + m.appendfilters = append(m.appendfilters, jm...) +} + +// AppendedFilters returns the list of values that were appended to the "filters" field in this mutation. +func (m *UsageCleanupTaskMutation) AppendedFilters() (json.RawMessage, bool) { + if len(m.appendfilters) == 0 { + return nil, false + } + return m.appendfilters, true +} + +// ResetFilters resets all changes to the "filters" field. +func (m *UsageCleanupTaskMutation) ResetFilters() { + m.filters = nil + m.appendfilters = nil +} + +// SetCreatedBy sets the "created_by" field. +func (m *UsageCleanupTaskMutation) SetCreatedBy(i int64) { + m.created_by = &i + m.addcreated_by = nil +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *UsageCleanupTaskMutation) CreatedBy() (r int64, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCreatedBy(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// AddCreatedBy adds i to the "created_by" field. +func (m *UsageCleanupTaskMutation) AddCreatedBy(i int64) { + if m.addcreated_by != nil { + *m.addcreated_by += i + } else { + m.addcreated_by = &i + } +} + +// AddedCreatedBy returns the value that was added to the "created_by" field in this mutation. +func (m *UsageCleanupTaskMutation) AddedCreatedBy() (r int64, exists bool) { + v := m.addcreated_by + if v == nil { + return + } + return *v, true +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *UsageCleanupTaskMutation) ResetCreatedBy() { + m.created_by = nil + m.addcreated_by = nil +} + +// SetDeletedRows sets the "deleted_rows" field. +func (m *UsageCleanupTaskMutation) SetDeletedRows(i int64) { + m.deleted_rows = &i + m.adddeleted_rows = nil +} + +// DeletedRows returns the value of the "deleted_rows" field in the mutation. +func (m *UsageCleanupTaskMutation) DeletedRows() (r int64, exists bool) { + v := m.deleted_rows + if v == nil { + return + } + return *v, true +} + +// OldDeletedRows returns the old "deleted_rows" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldDeletedRows(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedRows is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedRows requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedRows: %w", err) + } + return oldValue.DeletedRows, nil +} + +// AddDeletedRows adds i to the "deleted_rows" field. +func (m *UsageCleanupTaskMutation) AddDeletedRows(i int64) { + if m.adddeleted_rows != nil { + *m.adddeleted_rows += i + } else { + m.adddeleted_rows = &i + } +} + +// AddedDeletedRows returns the value that was added to the "deleted_rows" field in this mutation. +func (m *UsageCleanupTaskMutation) AddedDeletedRows() (r int64, exists bool) { + v := m.adddeleted_rows + if v == nil { + return + } + return *v, true +} + +// ResetDeletedRows resets all changes to the "deleted_rows" field. +func (m *UsageCleanupTaskMutation) ResetDeletedRows() { + m.deleted_rows = nil + m.adddeleted_rows = nil +} + +// SetErrorMessage sets the "error_message" field. +func (m *UsageCleanupTaskMutation) SetErrorMessage(s string) { + m.error_message = &s +} + +// ErrorMessage returns the value of the "error_message" field in the mutation. +func (m *UsageCleanupTaskMutation) ErrorMessage() (r string, exists bool) { + v := m.error_message + if v == nil { + return + } + return *v, true +} + +// OldErrorMessage returns the old "error_message" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldErrorMessage(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorMessage: %w", err) + } + return oldValue.ErrorMessage, nil +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (m *UsageCleanupTaskMutation) ClearErrorMessage() { + m.error_message = nil + m.clearedFields[usagecleanuptask.FieldErrorMessage] = struct{}{} +} + +// ErrorMessageCleared returns if the "error_message" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) ErrorMessageCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldErrorMessage] + return ok +} + +// ResetErrorMessage resets all changes to the "error_message" field. +func (m *UsageCleanupTaskMutation) ResetErrorMessage() { + m.error_message = nil + delete(m.clearedFields, usagecleanuptask.FieldErrorMessage) +} + +// SetCanceledBy sets the "canceled_by" field. +func (m *UsageCleanupTaskMutation) SetCanceledBy(i int64) { + m.canceled_by = &i + m.addcanceled_by = nil +} + +// CanceledBy returns the value of the "canceled_by" field in the mutation. +func (m *UsageCleanupTaskMutation) CanceledBy() (r int64, exists bool) { + v := m.canceled_by + if v == nil { + return + } + return *v, true +} + +// OldCanceledBy returns the old "canceled_by" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCanceledBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCanceledBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCanceledBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCanceledBy: %w", err) + } + return oldValue.CanceledBy, nil +} + +// AddCanceledBy adds i to the "canceled_by" field. +func (m *UsageCleanupTaskMutation) AddCanceledBy(i int64) { + if m.addcanceled_by != nil { + *m.addcanceled_by += i + } else { + m.addcanceled_by = &i + } +} + +// AddedCanceledBy returns the value that was added to the "canceled_by" field in this mutation. +func (m *UsageCleanupTaskMutation) AddedCanceledBy() (r int64, exists bool) { + v := m.addcanceled_by + if v == nil { + return + } + return *v, true +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (m *UsageCleanupTaskMutation) ClearCanceledBy() { + m.canceled_by = nil + m.addcanceled_by = nil + m.clearedFields[usagecleanuptask.FieldCanceledBy] = struct{}{} +} + +// CanceledByCleared returns if the "canceled_by" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) CanceledByCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldCanceledBy] + return ok +} + +// ResetCanceledBy resets all changes to the "canceled_by" field. +func (m *UsageCleanupTaskMutation) ResetCanceledBy() { + m.canceled_by = nil + m.addcanceled_by = nil + delete(m.clearedFields, usagecleanuptask.FieldCanceledBy) +} + +// SetCanceledAt sets the "canceled_at" field. +func (m *UsageCleanupTaskMutation) SetCanceledAt(t time.Time) { + m.canceled_at = &t +} + +// CanceledAt returns the value of the "canceled_at" field in the mutation. +func (m *UsageCleanupTaskMutation) CanceledAt() (r time.Time, exists bool) { + v := m.canceled_at + if v == nil { + return + } + return *v, true +} + +// OldCanceledAt returns the old "canceled_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldCanceledAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCanceledAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCanceledAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCanceledAt: %w", err) + } + return oldValue.CanceledAt, nil +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (m *UsageCleanupTaskMutation) ClearCanceledAt() { + m.canceled_at = nil + m.clearedFields[usagecleanuptask.FieldCanceledAt] = struct{}{} +} + +// CanceledAtCleared returns if the "canceled_at" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) CanceledAtCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldCanceledAt] + return ok +} + +// ResetCanceledAt resets all changes to the "canceled_at" field. +func (m *UsageCleanupTaskMutation) ResetCanceledAt() { + m.canceled_at = nil + delete(m.clearedFields, usagecleanuptask.FieldCanceledAt) +} + +// SetStartedAt sets the "started_at" field. +func (m *UsageCleanupTaskMutation) SetStartedAt(t time.Time) { + m.started_at = &t +} + +// StartedAt returns the value of the "started_at" field in the mutation. +func (m *UsageCleanupTaskMutation) StartedAt() (r time.Time, exists bool) { + v := m.started_at + if v == nil { + return + } + return *v, true +} + +// OldStartedAt returns the old "started_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldStartedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartedAt: %w", err) + } + return oldValue.StartedAt, nil +} + +// ClearStartedAt clears the value of the "started_at" field. +func (m *UsageCleanupTaskMutation) ClearStartedAt() { + m.started_at = nil + m.clearedFields[usagecleanuptask.FieldStartedAt] = struct{}{} +} + +// StartedAtCleared returns if the "started_at" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldStartedAt] + return ok +} + +// ResetStartedAt resets all changes to the "started_at" field. +func (m *UsageCleanupTaskMutation) ResetStartedAt() { + m.started_at = nil + delete(m.clearedFields, usagecleanuptask.FieldStartedAt) +} + +// SetFinishedAt sets the "finished_at" field. +func (m *UsageCleanupTaskMutation) SetFinishedAt(t time.Time) { + m.finished_at = &t +} + +// FinishedAt returns the value of the "finished_at" field in the mutation. +func (m *UsageCleanupTaskMutation) FinishedAt() (r time.Time, exists bool) { + v := m.finished_at + if v == nil { + return + } + return *v, true +} + +// OldFinishedAt returns the old "finished_at" field's value of the UsageCleanupTask entity. +// If the UsageCleanupTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageCleanupTaskMutation) OldFinishedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFinishedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFinishedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFinishedAt: %w", err) + } + return oldValue.FinishedAt, nil +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (m *UsageCleanupTaskMutation) ClearFinishedAt() { + m.finished_at = nil + m.clearedFields[usagecleanuptask.FieldFinishedAt] = struct{}{} +} + +// FinishedAtCleared returns if the "finished_at" field was cleared in this mutation. +func (m *UsageCleanupTaskMutation) FinishedAtCleared() bool { + _, ok := m.clearedFields[usagecleanuptask.FieldFinishedAt] + return ok +} + +// ResetFinishedAt resets all changes to the "finished_at" field. +func (m *UsageCleanupTaskMutation) ResetFinishedAt() { + m.finished_at = nil + delete(m.clearedFields, usagecleanuptask.FieldFinishedAt) +} + +// Where appends a list predicates to the UsageCleanupTaskMutation builder. +func (m *UsageCleanupTaskMutation) Where(ps ...predicate.UsageCleanupTask) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UsageCleanupTaskMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UsageCleanupTaskMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UsageCleanupTask, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UsageCleanupTaskMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UsageCleanupTaskMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UsageCleanupTask). +func (m *UsageCleanupTaskMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UsageCleanupTaskMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.created_at != nil { + fields = append(fields, usagecleanuptask.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, usagecleanuptask.FieldUpdatedAt) + } + if m.status != nil { + fields = append(fields, usagecleanuptask.FieldStatus) + } + if m.filters != nil { + fields = append(fields, usagecleanuptask.FieldFilters) + } + if m.created_by != nil { + fields = append(fields, usagecleanuptask.FieldCreatedBy) + } + if m.deleted_rows != nil { + fields = append(fields, usagecleanuptask.FieldDeletedRows) + } + if m.error_message != nil { + fields = append(fields, usagecleanuptask.FieldErrorMessage) + } + if m.canceled_by != nil { + fields = append(fields, usagecleanuptask.FieldCanceledBy) + } + if m.canceled_at != nil { + fields = append(fields, usagecleanuptask.FieldCanceledAt) + } + if m.started_at != nil { + fields = append(fields, usagecleanuptask.FieldStartedAt) + } + if m.finished_at != nil { + fields = append(fields, usagecleanuptask.FieldFinishedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UsageCleanupTaskMutation) Field(name string) (ent.Value, bool) { + switch name { + case usagecleanuptask.FieldCreatedAt: + return m.CreatedAt() + case usagecleanuptask.FieldUpdatedAt: + return m.UpdatedAt() + case usagecleanuptask.FieldStatus: + return m.Status() + case usagecleanuptask.FieldFilters: + return m.Filters() + case usagecleanuptask.FieldCreatedBy: + return m.CreatedBy() + case usagecleanuptask.FieldDeletedRows: + return m.DeletedRows() + case usagecleanuptask.FieldErrorMessage: + return m.ErrorMessage() + case usagecleanuptask.FieldCanceledBy: + return m.CanceledBy() + case usagecleanuptask.FieldCanceledAt: + return m.CanceledAt() + case usagecleanuptask.FieldStartedAt: + return m.StartedAt() + case usagecleanuptask.FieldFinishedAt: + return m.FinishedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UsageCleanupTaskMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case usagecleanuptask.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case usagecleanuptask.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case usagecleanuptask.FieldStatus: + return m.OldStatus(ctx) + case usagecleanuptask.FieldFilters: + return m.OldFilters(ctx) + case usagecleanuptask.FieldCreatedBy: + return m.OldCreatedBy(ctx) + case usagecleanuptask.FieldDeletedRows: + return m.OldDeletedRows(ctx) + case usagecleanuptask.FieldErrorMessage: + return m.OldErrorMessage(ctx) + case usagecleanuptask.FieldCanceledBy: + return m.OldCanceledBy(ctx) + case usagecleanuptask.FieldCanceledAt: + return m.OldCanceledAt(ctx) + case usagecleanuptask.FieldStartedAt: + return m.OldStartedAt(ctx) + case usagecleanuptask.FieldFinishedAt: + return m.OldFinishedAt(ctx) + } + return nil, fmt.Errorf("unknown UsageCleanupTask field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UsageCleanupTaskMutation) SetField(name string, value ent.Value) error { + switch name { + case usagecleanuptask.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case usagecleanuptask.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case usagecleanuptask.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case usagecleanuptask.FieldFilters: + v, ok := value.(json.RawMessage) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFilters(v) + return nil + case usagecleanuptask.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + case usagecleanuptask.FieldDeletedRows: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedRows(v) + return nil + case usagecleanuptask.FieldErrorMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorMessage(v) + return nil + case usagecleanuptask.FieldCanceledBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCanceledBy(v) + return nil + case usagecleanuptask.FieldCanceledAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCanceledAt(v) + return nil + case usagecleanuptask.FieldStartedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartedAt(v) + return nil + case usagecleanuptask.FieldFinishedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFinishedAt(v) + return nil + } + return fmt.Errorf("unknown UsageCleanupTask field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UsageCleanupTaskMutation) AddedFields() []string { + var fields []string + if m.addcreated_by != nil { + fields = append(fields, usagecleanuptask.FieldCreatedBy) + } + if m.adddeleted_rows != nil { + fields = append(fields, usagecleanuptask.FieldDeletedRows) + } + if m.addcanceled_by != nil { + fields = append(fields, usagecleanuptask.FieldCanceledBy) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UsageCleanupTaskMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case usagecleanuptask.FieldCreatedBy: + return m.AddedCreatedBy() + case usagecleanuptask.FieldDeletedRows: + return m.AddedDeletedRows() + case usagecleanuptask.FieldCanceledBy: + return m.AddedCanceledBy() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UsageCleanupTaskMutation) AddField(name string, value ent.Value) error { + switch name { + case usagecleanuptask.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCreatedBy(v) + return nil + case usagecleanuptask.FieldDeletedRows: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDeletedRows(v) + return nil + case usagecleanuptask.FieldCanceledBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCanceledBy(v) + return nil + } + return fmt.Errorf("unknown UsageCleanupTask numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UsageCleanupTaskMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(usagecleanuptask.FieldErrorMessage) { + fields = append(fields, usagecleanuptask.FieldErrorMessage) + } + if m.FieldCleared(usagecleanuptask.FieldCanceledBy) { + fields = append(fields, usagecleanuptask.FieldCanceledBy) + } + if m.FieldCleared(usagecleanuptask.FieldCanceledAt) { + fields = append(fields, usagecleanuptask.FieldCanceledAt) + } + if m.FieldCleared(usagecleanuptask.FieldStartedAt) { + fields = append(fields, usagecleanuptask.FieldStartedAt) + } + if m.FieldCleared(usagecleanuptask.FieldFinishedAt) { + fields = append(fields, usagecleanuptask.FieldFinishedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UsageCleanupTaskMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UsageCleanupTaskMutation) ClearField(name string) error { + switch name { + case usagecleanuptask.FieldErrorMessage: + m.ClearErrorMessage() + return nil + case usagecleanuptask.FieldCanceledBy: + m.ClearCanceledBy() + return nil + case usagecleanuptask.FieldCanceledAt: + m.ClearCanceledAt() + return nil + case usagecleanuptask.FieldStartedAt: + m.ClearStartedAt() + return nil + case usagecleanuptask.FieldFinishedAt: + m.ClearFinishedAt() + return nil + } + return fmt.Errorf("unknown UsageCleanupTask nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UsageCleanupTaskMutation) ResetField(name string) error { + switch name { + case usagecleanuptask.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case usagecleanuptask.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case usagecleanuptask.FieldStatus: + m.ResetStatus() + return nil + case usagecleanuptask.FieldFilters: + m.ResetFilters() + return nil + case usagecleanuptask.FieldCreatedBy: + m.ResetCreatedBy() + return nil + case usagecleanuptask.FieldDeletedRows: + m.ResetDeletedRows() + return nil + case usagecleanuptask.FieldErrorMessage: + m.ResetErrorMessage() + return nil + case usagecleanuptask.FieldCanceledBy: + m.ResetCanceledBy() + return nil + case usagecleanuptask.FieldCanceledAt: + m.ResetCanceledAt() + return nil + case usagecleanuptask.FieldStartedAt: + m.ResetStartedAt() + return nil + case usagecleanuptask.FieldFinishedAt: + m.ResetFinishedAt() + return nil + } + return fmt.Errorf("unknown UsageCleanupTask field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UsageCleanupTaskMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UsageCleanupTaskMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UsageCleanupTaskMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UsageCleanupTaskMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UsageCleanupTaskMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UsageCleanupTaskMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UsageCleanupTaskMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown UsageCleanupTask unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UsageCleanupTaskMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown UsageCleanupTask edge %s", name) +} + // UsageLogMutation represents an operation that mutates the UsageLog nodes in the graph. type UsageLogMutation struct { config @@ -13435,6 +16191,9 @@ type UserMutation struct { status *string username *string notes *string + totp_secret_encrypted *string + totp_enabled *bool + totp_enabled_at *time.Time clearedFields map[string]struct{} api_keys map[int64]struct{} removedapi_keys map[int64]struct{} @@ -13448,6 +16207,9 @@ type UserMutation struct { assigned_subscriptions map[int64]struct{} removedassigned_subscriptions map[int64]struct{} clearedassigned_subscriptions bool + announcement_reads map[int64]struct{} + removedannouncement_reads map[int64]struct{} + clearedannouncement_reads bool allowed_groups map[int64]struct{} removedallowed_groups map[int64]struct{} clearedallowed_groups bool @@ -14012,6 +16774,140 @@ func (m *UserMutation) ResetNotes() { m.notes = nil } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (m *UserMutation) SetTotpSecretEncrypted(s string) { + m.totp_secret_encrypted = &s +} + +// TotpSecretEncrypted returns the value of the "totp_secret_encrypted" field in the mutation. +func (m *UserMutation) TotpSecretEncrypted() (r string, exists bool) { + v := m.totp_secret_encrypted + if v == nil { + return + } + return *v, true +} + +// OldTotpSecretEncrypted returns the old "totp_secret_encrypted" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldTotpSecretEncrypted(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotpSecretEncrypted is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotpSecretEncrypted requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotpSecretEncrypted: %w", err) + } + return oldValue.TotpSecretEncrypted, nil +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (m *UserMutation) ClearTotpSecretEncrypted() { + m.totp_secret_encrypted = nil + m.clearedFields[user.FieldTotpSecretEncrypted] = struct{}{} +} + +// TotpSecretEncryptedCleared returns if the "totp_secret_encrypted" field was cleared in this mutation. +func (m *UserMutation) TotpSecretEncryptedCleared() bool { + _, ok := m.clearedFields[user.FieldTotpSecretEncrypted] + return ok +} + +// ResetTotpSecretEncrypted resets all changes to the "totp_secret_encrypted" field. +func (m *UserMutation) ResetTotpSecretEncrypted() { + m.totp_secret_encrypted = nil + delete(m.clearedFields, user.FieldTotpSecretEncrypted) +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (m *UserMutation) SetTotpEnabled(b bool) { + m.totp_enabled = &b +} + +// TotpEnabled returns the value of the "totp_enabled" field in the mutation. +func (m *UserMutation) TotpEnabled() (r bool, exists bool) { + v := m.totp_enabled + if v == nil { + return + } + return *v, true +} + +// OldTotpEnabled returns the old "totp_enabled" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldTotpEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotpEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotpEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotpEnabled: %w", err) + } + return oldValue.TotpEnabled, nil +} + +// ResetTotpEnabled resets all changes to the "totp_enabled" field. +func (m *UserMutation) ResetTotpEnabled() { + m.totp_enabled = nil +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (m *UserMutation) SetTotpEnabledAt(t time.Time) { + m.totp_enabled_at = &t +} + +// TotpEnabledAt returns the value of the "totp_enabled_at" field in the mutation. +func (m *UserMutation) TotpEnabledAt() (r time.Time, exists bool) { + v := m.totp_enabled_at + if v == nil { + return + } + return *v, true +} + +// OldTotpEnabledAt returns the old "totp_enabled_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldTotpEnabledAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotpEnabledAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotpEnabledAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotpEnabledAt: %w", err) + } + return oldValue.TotpEnabledAt, nil +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (m *UserMutation) ClearTotpEnabledAt() { + m.totp_enabled_at = nil + m.clearedFields[user.FieldTotpEnabledAt] = struct{}{} +} + +// TotpEnabledAtCleared returns if the "totp_enabled_at" field was cleared in this mutation. +func (m *UserMutation) TotpEnabledAtCleared() bool { + _, ok := m.clearedFields[user.FieldTotpEnabledAt] + return ok +} + +// ResetTotpEnabledAt resets all changes to the "totp_enabled_at" field. +func (m *UserMutation) ResetTotpEnabledAt() { + m.totp_enabled_at = nil + delete(m.clearedFields, user.FieldTotpEnabledAt) +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. func (m *UserMutation) AddAPIKeyIDs(ids ...int64) { if m.api_keys == nil { @@ -14228,6 +17124,60 @@ func (m *UserMutation) ResetAssignedSubscriptions() { m.removedassigned_subscriptions = nil } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by ids. +func (m *UserMutation) AddAnnouncementReadIDs(ids ...int64) { + if m.announcement_reads == nil { + m.announcement_reads = make(map[int64]struct{}) + } + for i := range ids { + m.announcement_reads[ids[i]] = struct{}{} + } +} + +// ClearAnnouncementReads clears the "announcement_reads" edge to the AnnouncementRead entity. +func (m *UserMutation) ClearAnnouncementReads() { + m.clearedannouncement_reads = true +} + +// AnnouncementReadsCleared reports if the "announcement_reads" edge to the AnnouncementRead entity was cleared. +func (m *UserMutation) AnnouncementReadsCleared() bool { + return m.clearedannouncement_reads +} + +// RemoveAnnouncementReadIDs removes the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (m *UserMutation) RemoveAnnouncementReadIDs(ids ...int64) { + if m.removedannouncement_reads == nil { + m.removedannouncement_reads = make(map[int64]struct{}) + } + for i := range ids { + delete(m.announcement_reads, ids[i]) + m.removedannouncement_reads[ids[i]] = struct{}{} + } +} + +// RemovedAnnouncementReads returns the removed IDs of the "announcement_reads" edge to the AnnouncementRead entity. +func (m *UserMutation) RemovedAnnouncementReadsIDs() (ids []int64) { + for id := range m.removedannouncement_reads { + ids = append(ids, id) + } + return +} + +// AnnouncementReadsIDs returns the "announcement_reads" edge IDs in the mutation. +func (m *UserMutation) AnnouncementReadsIDs() (ids []int64) { + for id := range m.announcement_reads { + ids = append(ids, id) + } + return +} + +// ResetAnnouncementReads resets all changes to the "announcement_reads" edge. +func (m *UserMutation) ResetAnnouncementReads() { + m.announcement_reads = nil + m.clearedannouncement_reads = false + m.removedannouncement_reads = nil +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by ids. func (m *UserMutation) AddAllowedGroupIDs(ids ...int64) { if m.allowed_groups == nil { @@ -14478,7 +17428,7 @@ func (m *UserMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *UserMutation) Fields() []string { - fields := make([]string, 0, 11) + fields := make([]string, 0, 14) if m.created_at != nil { fields = append(fields, user.FieldCreatedAt) } @@ -14512,6 +17462,15 @@ func (m *UserMutation) Fields() []string { if m.notes != nil { fields = append(fields, user.FieldNotes) } + if m.totp_secret_encrypted != nil { + fields = append(fields, user.FieldTotpSecretEncrypted) + } + if m.totp_enabled != nil { + fields = append(fields, user.FieldTotpEnabled) + } + if m.totp_enabled_at != nil { + fields = append(fields, user.FieldTotpEnabledAt) + } return fields } @@ -14542,6 +17501,12 @@ func (m *UserMutation) Field(name string) (ent.Value, bool) { return m.Username() case user.FieldNotes: return m.Notes() + case user.FieldTotpSecretEncrypted: + return m.TotpSecretEncrypted() + case user.FieldTotpEnabled: + return m.TotpEnabled() + case user.FieldTotpEnabledAt: + return m.TotpEnabledAt() } return nil, false } @@ -14573,6 +17538,12 @@ func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, er return m.OldUsername(ctx) case user.FieldNotes: return m.OldNotes(ctx) + case user.FieldTotpSecretEncrypted: + return m.OldTotpSecretEncrypted(ctx) + case user.FieldTotpEnabled: + return m.OldTotpEnabled(ctx) + case user.FieldTotpEnabledAt: + return m.OldTotpEnabledAt(ctx) } return nil, fmt.Errorf("unknown User field %s", name) } @@ -14659,6 +17630,27 @@ func (m *UserMutation) SetField(name string, value ent.Value) error { } m.SetNotes(v) return nil + case user.FieldTotpSecretEncrypted: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotpSecretEncrypted(v) + return nil + case user.FieldTotpEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotpEnabled(v) + return nil + case user.FieldTotpEnabledAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotpEnabledAt(v) + return nil } return fmt.Errorf("unknown User field %s", name) } @@ -14719,6 +17711,12 @@ func (m *UserMutation) ClearedFields() []string { if m.FieldCleared(user.FieldDeletedAt) { fields = append(fields, user.FieldDeletedAt) } + if m.FieldCleared(user.FieldTotpSecretEncrypted) { + fields = append(fields, user.FieldTotpSecretEncrypted) + } + if m.FieldCleared(user.FieldTotpEnabledAt) { + fields = append(fields, user.FieldTotpEnabledAt) + } return fields } @@ -14736,6 +17734,12 @@ func (m *UserMutation) ClearField(name string) error { case user.FieldDeletedAt: m.ClearDeletedAt() return nil + case user.FieldTotpSecretEncrypted: + m.ClearTotpSecretEncrypted() + return nil + case user.FieldTotpEnabledAt: + m.ClearTotpEnabledAt() + return nil } return fmt.Errorf("unknown User nullable field %s", name) } @@ -14777,13 +17781,22 @@ func (m *UserMutation) ResetField(name string) error { case user.FieldNotes: m.ResetNotes() return nil + case user.FieldTotpSecretEncrypted: + m.ResetTotpSecretEncrypted() + return nil + case user.FieldTotpEnabled: + m.ResetTotpEnabled() + return nil + case user.FieldTotpEnabledAt: + m.ResetTotpEnabledAt() + return nil } return fmt.Errorf("unknown User field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. func (m *UserMutation) AddedEdges() []string { - edges := make([]string, 0, 8) + edges := make([]string, 0, 9) if m.api_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -14796,6 +17809,9 @@ func (m *UserMutation) AddedEdges() []string { if m.assigned_subscriptions != nil { edges = append(edges, user.EdgeAssignedSubscriptions) } + if m.announcement_reads != nil { + edges = append(edges, user.EdgeAnnouncementReads) + } if m.allowed_groups != nil { edges = append(edges, user.EdgeAllowedGroups) } @@ -14839,6 +17855,12 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeAnnouncementReads: + ids := make([]ent.Value, 0, len(m.announcement_reads)) + for id := range m.announcement_reads { + ids = append(ids, id) + } + return ids case user.EdgeAllowedGroups: ids := make([]ent.Value, 0, len(m.allowed_groups)) for id := range m.allowed_groups { @@ -14869,7 +17891,7 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value { // RemovedEdges returns all edge names that were removed in this mutation. func (m *UserMutation) RemovedEdges() []string { - edges := make([]string, 0, 8) + edges := make([]string, 0, 9) if m.removedapi_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -14882,6 +17904,9 @@ func (m *UserMutation) RemovedEdges() []string { if m.removedassigned_subscriptions != nil { edges = append(edges, user.EdgeAssignedSubscriptions) } + if m.removedannouncement_reads != nil { + edges = append(edges, user.EdgeAnnouncementReads) + } if m.removedallowed_groups != nil { edges = append(edges, user.EdgeAllowedGroups) } @@ -14925,6 +17950,12 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeAnnouncementReads: + ids := make([]ent.Value, 0, len(m.removedannouncement_reads)) + for id := range m.removedannouncement_reads { + ids = append(ids, id) + } + return ids case user.EdgeAllowedGroups: ids := make([]ent.Value, 0, len(m.removedallowed_groups)) for id := range m.removedallowed_groups { @@ -14955,7 +17986,7 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value { // ClearedEdges returns all edge names that were cleared in this mutation. func (m *UserMutation) ClearedEdges() []string { - edges := make([]string, 0, 8) + edges := make([]string, 0, 9) if m.clearedapi_keys { edges = append(edges, user.EdgeAPIKeys) } @@ -14968,6 +17999,9 @@ func (m *UserMutation) ClearedEdges() []string { if m.clearedassigned_subscriptions { edges = append(edges, user.EdgeAssignedSubscriptions) } + if m.clearedannouncement_reads { + edges = append(edges, user.EdgeAnnouncementReads) + } if m.clearedallowed_groups { edges = append(edges, user.EdgeAllowedGroups) } @@ -14995,6 +18029,8 @@ func (m *UserMutation) EdgeCleared(name string) bool { return m.clearedsubscriptions case user.EdgeAssignedSubscriptions: return m.clearedassigned_subscriptions + case user.EdgeAnnouncementReads: + return m.clearedannouncement_reads case user.EdgeAllowedGroups: return m.clearedallowed_groups case user.EdgeUsageLogs: @@ -15031,6 +18067,9 @@ func (m *UserMutation) ResetEdge(name string) error { case user.EdgeAssignedSubscriptions: m.ResetAssignedSubscriptions() return nil + case user.EdgeAnnouncementReads: + m.ResetAnnouncementReads() + return nil case user.EdgeAllowedGroups: m.ResetAllowedGroups() return nil diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 7a443c5d..613c5913 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -15,6 +15,12 @@ type Account func(*sql.Selector) // AccountGroup is the predicate function for accountgroup builders. type AccountGroup func(*sql.Selector) +// Announcement is the predicate function for announcement builders. +type Announcement func(*sql.Selector) + +// AnnouncementRead is the predicate function for announcementread builders. +type AnnouncementRead func(*sql.Selector) + // Group is the predicate function for group builders. type Group func(*sql.Selector) @@ -33,6 +39,9 @@ type RedeemCode func(*sql.Selector) // Setting is the predicate function for setting builders. type Setting func(*sql.Selector) +// UsageCleanupTask is the predicate function for usagecleanuptask builders. +type UsageCleanupTask func(*sql.Selector) + // UsageLog is the predicate function for usagelog builders. type UsageLog func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index e7f33598..790c1489 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -7,6 +7,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocode" @@ -15,6 +17,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/schema" "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" @@ -209,6 +212,56 @@ func init() { accountgroupDescCreatedAt := accountgroupFields[3].Descriptor() // accountgroup.DefaultCreatedAt holds the default value on creation for the created_at field. accountgroup.DefaultCreatedAt = accountgroupDescCreatedAt.Default.(func() time.Time) + announcementFields := schema.Announcement{}.Fields() + _ = announcementFields + // announcementDescTitle is the schema descriptor for title field. + announcementDescTitle := announcementFields[0].Descriptor() + // announcement.TitleValidator is a validator for the "title" field. It is called by the builders before save. + announcement.TitleValidator = func() func(string) error { + validators := announcementDescTitle.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(title string) error { + for _, fn := range fns { + if err := fn(title); err != nil { + return err + } + } + return nil + } + }() + // announcementDescContent is the schema descriptor for content field. + announcementDescContent := announcementFields[1].Descriptor() + // announcement.ContentValidator is a validator for the "content" field. It is called by the builders before save. + announcement.ContentValidator = announcementDescContent.Validators[0].(func(string) error) + // announcementDescStatus is the schema descriptor for status field. + announcementDescStatus := announcementFields[2].Descriptor() + // announcement.DefaultStatus holds the default value on creation for the status field. + announcement.DefaultStatus = announcementDescStatus.Default.(string) + // announcement.StatusValidator is a validator for the "status" field. It is called by the builders before save. + announcement.StatusValidator = announcementDescStatus.Validators[0].(func(string) error) + // announcementDescCreatedAt is the schema descriptor for created_at field. + announcementDescCreatedAt := announcementFields[8].Descriptor() + // announcement.DefaultCreatedAt holds the default value on creation for the created_at field. + announcement.DefaultCreatedAt = announcementDescCreatedAt.Default.(func() time.Time) + // announcementDescUpdatedAt is the schema descriptor for updated_at field. + announcementDescUpdatedAt := announcementFields[9].Descriptor() + // announcement.DefaultUpdatedAt holds the default value on creation for the updated_at field. + announcement.DefaultUpdatedAt = announcementDescUpdatedAt.Default.(func() time.Time) + // announcement.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + announcement.UpdateDefaultUpdatedAt = announcementDescUpdatedAt.UpdateDefault.(func() time.Time) + announcementreadFields := schema.AnnouncementRead{}.Fields() + _ = announcementreadFields + // announcementreadDescReadAt is the schema descriptor for read_at field. + announcementreadDescReadAt := announcementreadFields[2].Descriptor() + // announcementread.DefaultReadAt holds the default value on creation for the read_at field. + announcementread.DefaultReadAt = announcementreadDescReadAt.Default.(func() time.Time) + // announcementreadDescCreatedAt is the schema descriptor for created_at field. + announcementreadDescCreatedAt := announcementreadFields[3].Descriptor() + // announcementread.DefaultCreatedAt holds the default value on creation for the created_at field. + announcementread.DefaultCreatedAt = announcementreadDescCreatedAt.Default.(func() time.Time) groupMixin := schema.Group{}.Mixin() groupMixinHooks1 := groupMixin[1].Hooks() group.Hooks[0] = groupMixinHooks1[0] @@ -499,6 +552,43 @@ func init() { setting.DefaultUpdatedAt = settingDescUpdatedAt.Default.(func() time.Time) // setting.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. setting.UpdateDefaultUpdatedAt = settingDescUpdatedAt.UpdateDefault.(func() time.Time) + usagecleanuptaskMixin := schema.UsageCleanupTask{}.Mixin() + usagecleanuptaskMixinFields0 := usagecleanuptaskMixin[0].Fields() + _ = usagecleanuptaskMixinFields0 + usagecleanuptaskFields := schema.UsageCleanupTask{}.Fields() + _ = usagecleanuptaskFields + // usagecleanuptaskDescCreatedAt is the schema descriptor for created_at field. + usagecleanuptaskDescCreatedAt := usagecleanuptaskMixinFields0[0].Descriptor() + // usagecleanuptask.DefaultCreatedAt holds the default value on creation for the created_at field. + usagecleanuptask.DefaultCreatedAt = usagecleanuptaskDescCreatedAt.Default.(func() time.Time) + // usagecleanuptaskDescUpdatedAt is the schema descriptor for updated_at field. + usagecleanuptaskDescUpdatedAt := usagecleanuptaskMixinFields0[1].Descriptor() + // usagecleanuptask.DefaultUpdatedAt holds the default value on creation for the updated_at field. + usagecleanuptask.DefaultUpdatedAt = usagecleanuptaskDescUpdatedAt.Default.(func() time.Time) + // usagecleanuptask.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + usagecleanuptask.UpdateDefaultUpdatedAt = usagecleanuptaskDescUpdatedAt.UpdateDefault.(func() time.Time) + // usagecleanuptaskDescStatus is the schema descriptor for status field. + usagecleanuptaskDescStatus := usagecleanuptaskFields[0].Descriptor() + // usagecleanuptask.StatusValidator is a validator for the "status" field. It is called by the builders before save. + usagecleanuptask.StatusValidator = func() func(string) error { + validators := usagecleanuptaskDescStatus.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(status string) error { + for _, fn := range fns { + if err := fn(status); err != nil { + return err + } + } + return nil + } + }() + // usagecleanuptaskDescDeletedRows is the schema descriptor for deleted_rows field. + usagecleanuptaskDescDeletedRows := usagecleanuptaskFields[3].Descriptor() + // usagecleanuptask.DefaultDeletedRows holds the default value on creation for the deleted_rows field. + usagecleanuptask.DefaultDeletedRows = usagecleanuptaskDescDeletedRows.Default.(int64) usagelogFields := schema.UsageLog{}.Fields() _ = usagelogFields // usagelogDescRequestID is the schema descriptor for request_id field. @@ -702,6 +792,10 @@ func init() { userDescNotes := userFields[7].Descriptor() // user.DefaultNotes holds the default value on creation for the notes field. user.DefaultNotes = userDescNotes.Default.(string) + // userDescTotpEnabled is the schema descriptor for totp_enabled field. + userDescTotpEnabled := userFields[9].Descriptor() + // user.DefaultTotpEnabled holds the default value on creation for the totp_enabled field. + user.DefaultTotpEnabled = userDescTotpEnabled.Default.(bool) userallowedgroupFields := schema.UserAllowedGroup{}.Fields() _ = userallowedgroupFields // userallowedgroupDescCreatedAt is the schema descriptor for created_at field. diff --git a/backend/ent/schema/account.go b/backend/ent/schema/account.go index dd79ba96..1cfecc2d 100644 --- a/backend/ent/schema/account.go +++ b/backend/ent/schema/account.go @@ -4,7 +4,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -111,7 +111,7 @@ func (Account) Fields() []ent.Field { // status: 账户状态,如 "active", "error", "disabled" field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), // error_message: 错误信息,记录账户异常时的详细信息 field.String("error_message"). diff --git a/backend/ent/schema/announcement.go b/backend/ent/schema/announcement.go new file mode 100644 index 00000000..1568778f --- /dev/null +++ b/backend/ent/schema/announcement.go @@ -0,0 +1,90 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/domain" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Announcement holds the schema definition for the Announcement entity. +// +// 删除策略:硬删除(已读记录通过外键级联删除) +type Announcement struct { + ent.Schema +} + +func (Announcement) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "announcements"}, + } +} + +func (Announcement) Fields() []ent.Field { + return []ent.Field{ + field.String("title"). + MaxLen(200). + NotEmpty(). + Comment("公告标题"), + field.String("content"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + NotEmpty(). + Comment("公告内容(支持 Markdown)"), + field.String("status"). + MaxLen(20). + Default(domain.AnnouncementStatusDraft). + Comment("状态: draft, active, archived"), + field.JSON("targeting", domain.AnnouncementTargeting{}). + Optional(). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}). + Comment("展示条件(JSON 规则)"), + field.Time("starts_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("开始展示时间(为空表示立即生效)"), + field.Time("ends_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("结束展示时间(为空表示永久生效)"), + field.Int64("created_by"). + Optional(). + Nillable(). + Comment("创建人用户ID(管理员)"), + field.Int64("updated_by"). + Optional(). + Nillable(). + Comment("更新人用户ID(管理员)"), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (Announcement) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("reads", AnnouncementRead.Type), + } +} + +func (Announcement) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("status"), + index.Fields("created_at"), + index.Fields("starts_at"), + index.Fields("ends_at"), + } +} diff --git a/backend/ent/schema/announcement_read.go b/backend/ent/schema/announcement_read.go new file mode 100644 index 00000000..e0b50777 --- /dev/null +++ b/backend/ent/schema/announcement_read.go @@ -0,0 +1,65 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// AnnouncementRead holds the schema definition for the AnnouncementRead entity. +// +// 记录用户对公告的已读状态(首次已读时间)。 +type AnnouncementRead struct { + ent.Schema +} + +func (AnnouncementRead) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "announcement_reads"}, + } +} + +func (AnnouncementRead) Fields() []ent.Field { + return []ent.Field{ + field.Int64("announcement_id"), + field.Int64("user_id"), + field.Time("read_at"). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("用户首次已读时间"), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (AnnouncementRead) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("announcement", Announcement.Type). + Ref("reads"). + Field("announcement_id"). + Unique(). + Required(), + edge.From("user", User.Type). + Ref("announcement_reads"). + Field("user_id"). + Unique(). + Required(), + } +} + +func (AnnouncementRead) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("announcement_id"), + index.Fields("user_id"), + index.Fields("read_at"), + index.Fields("announcement_id", "user_id").Unique(), + } +} diff --git a/backend/ent/schema/api_key.go b/backend/ent/schema/api_key.go index 1b206089..1c2d4bd4 100644 --- a/backend/ent/schema/api_key.go +++ b/backend/ent/schema/api_key.go @@ -2,7 +2,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect/entsql" @@ -45,7 +45,7 @@ func (APIKey) Fields() []ent.Field { Nillable(), field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), field.JSON("ip_whitelist", []string{}). Optional(). Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"), diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index 58583752..020f9f57 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -2,7 +2,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -49,15 +49,15 @@ func (Group) Fields() []ent.Field { Default(false), field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), // Subscription-related fields (added by migration 003) field.String("platform"). MaxLen(50). - Default(service.PlatformAnthropic), + Default(domain.PlatformAnthropic), field.String("subscription_type"). MaxLen(20). - Default(service.SubscriptionTypeStandard), + Default(domain.SubscriptionTypeStandard), field.Float("daily_limit_usd"). Optional(). Nillable(). diff --git a/backend/ent/schema/mixins/soft_delete.go b/backend/ent/schema/mixins/soft_delete.go index 9571bc9c..22eded3e 100644 --- a/backend/ent/schema/mixins/soft_delete.go +++ b/backend/ent/schema/mixins/soft_delete.go @@ -5,6 +5,7 @@ package mixins import ( "context" "fmt" + "reflect" "time" "entgo.io/ent" @@ -12,7 +13,6 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/schema/field" "entgo.io/ent/schema/mixin" - dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/intercept" ) @@ -113,7 +113,6 @@ func (d SoftDeleteMixin) Hooks() []ent.Hook { SetOp(ent.Op) SetDeletedAt(time.Time) WhereP(...func(*sql.Selector)) - Client() *dbent.Client }) if !ok { return nil, fmt.Errorf("unexpected mutation type %T", m) @@ -124,7 +123,7 @@ func (d SoftDeleteMixin) Hooks() []ent.Hook { mx.SetOp(ent.OpUpdate) // 设置删除时间为当前时间 mx.SetDeletedAt(time.Now()) - return mx.Client().Mutate(ctx, m) + return mutateWithClient(ctx, m, next) }) }, } @@ -137,3 +136,41 @@ func (d SoftDeleteMixin) applyPredicate(w interface{ WhereP(...func(*sql.Selecto sql.FieldIsNull(d.Fields()[0].Descriptor().Name), ) } + +func mutateWithClient(ctx context.Context, m ent.Mutation, fallback ent.Mutator) (ent.Value, error) { + clientMethod := reflect.ValueOf(m).MethodByName("Client") + if !clientMethod.IsValid() || clientMethod.Type().NumIn() != 0 || clientMethod.Type().NumOut() != 1 { + return nil, fmt.Errorf("soft delete: mutation client method not found for %T", m) + } + client := clientMethod.Call(nil)[0] + mutateMethod := client.MethodByName("Mutate") + if !mutateMethod.IsValid() { + return nil, fmt.Errorf("soft delete: mutation client missing Mutate for %T", m) + } + if mutateMethod.Type().NumIn() != 2 || mutateMethod.Type().NumOut() != 2 { + return nil, fmt.Errorf("soft delete: mutation client signature mismatch for %T", m) + } + + results := mutateMethod.Call([]reflect.Value{reflect.ValueOf(ctx), reflect.ValueOf(m)}) + value := results[0].Interface() + var err error + if !results[1].IsNil() { + errValue := results[1].Interface() + typedErr, ok := errValue.(error) + if !ok { + return nil, fmt.Errorf("soft delete: unexpected error type %T for %T", errValue, m) + } + err = typedErr + } + if err != nil { + return nil, err + } + if value == nil { + return nil, fmt.Errorf("soft delete: mutation client returned nil for %T", m) + } + v, ok := value.(ent.Value) + if !ok { + return nil, fmt.Errorf("soft delete: unexpected value type %T for %T", value, m) + } + return v, nil +} diff --git a/backend/ent/schema/promo_code.go b/backend/ent/schema/promo_code.go index c3bb824b..3dd08c0e 100644 --- a/backend/ent/schema/promo_code.go +++ b/backend/ent/schema/promo_code.go @@ -3,7 +3,7 @@ package schema import ( "time" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -49,7 +49,7 @@ func (PromoCode) Fields() []ent.Field { Comment("已使用次数"), field.String("status"). MaxLen(20). - Default(service.PromoCodeStatusActive). + Default(domain.PromoCodeStatusActive). Comment("状态: active, disabled"), field.Time("expires_at"). Optional(). diff --git a/backend/ent/schema/redeem_code.go b/backend/ent/schema/redeem_code.go index b4664e06..6fb86148 100644 --- a/backend/ent/schema/redeem_code.go +++ b/backend/ent/schema/redeem_code.go @@ -3,7 +3,7 @@ package schema import ( "time" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -41,13 +41,13 @@ func (RedeemCode) Fields() []ent.Field { Unique(), field.String("type"). MaxLen(20). - Default(service.RedeemTypeBalance), + Default(domain.RedeemTypeBalance), field.Float("value"). SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). Default(0), field.String("status"). MaxLen(20). - Default(service.StatusUnused), + Default(domain.StatusUnused), field.Int64("used_by"). Optional(). Nillable(), diff --git a/backend/ent/schema/usage_cleanup_task.go b/backend/ent/schema/usage_cleanup_task.go new file mode 100644 index 00000000..753e6410 --- /dev/null +++ b/backend/ent/schema/usage_cleanup_task.go @@ -0,0 +1,75 @@ +package schema + +import ( + "encoding/json" + "fmt" + + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UsageCleanupTask 定义使用记录清理任务的 schema。 +type UsageCleanupTask struct { + ent.Schema +} + +func (UsageCleanupTask) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "usage_cleanup_tasks"}, + } +} + +func (UsageCleanupTask) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + } +} + +func (UsageCleanupTask) Fields() []ent.Field { + return []ent.Field{ + field.String("status"). + MaxLen(20). + Validate(validateUsageCleanupStatus), + field.JSON("filters", json.RawMessage{}), + field.Int64("created_by"), + field.Int64("deleted_rows"). + Default(0), + field.String("error_message"). + Optional(). + Nillable(), + field.Int64("canceled_by"). + Optional(). + Nillable(), + field.Time("canceled_at"). + Optional(). + Nillable(), + field.Time("started_at"). + Optional(). + Nillable(), + field.Time("finished_at"). + Optional(). + Nillable(), + } +} + +func (UsageCleanupTask) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("status", "created_at"), + index.Fields("created_at"), + index.Fields("canceled_at"), + } +} + +func validateUsageCleanupStatus(status string) error { + switch status { + case "pending", "running", "succeeded", "failed", "canceled": + return nil + default: + return fmt.Errorf("invalid usage cleanup status: %s", status) + } +} diff --git a/backend/ent/schema/user.go b/backend/ent/schema/user.go index 79dc2286..d443ef45 100644 --- a/backend/ent/schema/user.go +++ b/backend/ent/schema/user.go @@ -2,7 +2,7 @@ package schema import ( "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -43,7 +43,7 @@ func (User) Fields() []ent.Field { NotEmpty(), field.String("role"). MaxLen(20). - Default(service.RoleUser), + Default(domain.RoleUser), field.Float("balance"). SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). Default(0), @@ -51,7 +51,7 @@ func (User) Fields() []ent.Field { Default(5), field.String("status"). MaxLen(20). - Default(service.StatusActive), + Default(domain.StatusActive), // Optional profile fields (added later; default '' in DB migration) field.String("username"). @@ -61,6 +61,17 @@ func (User) Fields() []ent.Field { field.String("notes"). SchemaType(map[string]string{dialect.Postgres: "text"}). Default(""), + + // TOTP 双因素认证字段 + field.String("totp_secret_encrypted"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Optional(). + Nillable(), + field.Bool("totp_enabled"). + Default(false), + field.Time("totp_enabled_at"). + Optional(). + Nillable(), } } @@ -70,6 +81,7 @@ func (User) Edges() []ent.Edge { edge.To("redeem_codes", RedeemCode.Type), edge.To("subscriptions", UserSubscription.Type), edge.To("assigned_subscriptions", UserSubscription.Type), + edge.To("announcement_reads", AnnouncementRead.Type), edge.To("allowed_groups", Group.Type). Through("user_allowed_groups", UserAllowedGroup.Type), edge.To("usage_logs", UsageLog.Type), diff --git a/backend/ent/schema/user_subscription.go b/backend/ent/schema/user_subscription.go index b21f4083..fa13612b 100644 --- a/backend/ent/schema/user_subscription.go +++ b/backend/ent/schema/user_subscription.go @@ -4,7 +4,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/ent/schema/mixins" - "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/domain" "entgo.io/ent" "entgo.io/ent/dialect" @@ -44,7 +44,7 @@ func (UserSubscription) Fields() []ent.Field { SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), field.String("status"). MaxLen(20). - Default(service.SubscriptionStatusActive), + Default(domain.SubscriptionStatusActive), field.Time("daily_window_start"). Optional(). diff --git a/backend/ent/tx.go b/backend/ent/tx.go index 56df121a..702bdf90 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -20,6 +20,10 @@ type Tx struct { Account *AccountClient // AccountGroup is the client for interacting with the AccountGroup builders. AccountGroup *AccountGroupClient + // Announcement is the client for interacting with the Announcement builders. + Announcement *AnnouncementClient + // AnnouncementRead is the client for interacting with the AnnouncementRead builders. + AnnouncementRead *AnnouncementReadClient // Group is the client for interacting with the Group builders. Group *GroupClient // PromoCode is the client for interacting with the PromoCode builders. @@ -32,6 +36,8 @@ type Tx struct { RedeemCode *RedeemCodeClient // Setting is the client for interacting with the Setting builders. Setting *SettingClient + // UsageCleanupTask is the client for interacting with the UsageCleanupTask builders. + UsageCleanupTask *UsageCleanupTaskClient // UsageLog is the client for interacting with the UsageLog builders. UsageLog *UsageLogClient // User is the client for interacting with the User builders. @@ -178,12 +184,15 @@ func (tx *Tx) init() { tx.APIKey = NewAPIKeyClient(tx.config) tx.Account = NewAccountClient(tx.config) tx.AccountGroup = NewAccountGroupClient(tx.config) + tx.Announcement = NewAnnouncementClient(tx.config) + tx.AnnouncementRead = NewAnnouncementReadClient(tx.config) tx.Group = NewGroupClient(tx.config) tx.PromoCode = NewPromoCodeClient(tx.config) tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config) tx.Proxy = NewProxyClient(tx.config) tx.RedeemCode = NewRedeemCodeClient(tx.config) tx.Setting = NewSettingClient(tx.config) + tx.UsageCleanupTask = NewUsageCleanupTaskClient(tx.config) tx.UsageLog = NewUsageLogClient(tx.config) tx.User = NewUserClient(tx.config) tx.UserAllowedGroup = NewUserAllowedGroupClient(tx.config) diff --git a/backend/ent/usagecleanuptask.go b/backend/ent/usagecleanuptask.go new file mode 100644 index 00000000..e3a17b5a --- /dev/null +++ b/backend/ent/usagecleanuptask.go @@ -0,0 +1,236 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTask is the model entity for the UsageCleanupTask schema. +type UsageCleanupTask struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Filters holds the value of the "filters" field. + Filters json.RawMessage `json:"filters,omitempty"` + // CreatedBy holds the value of the "created_by" field. + CreatedBy int64 `json:"created_by,omitempty"` + // DeletedRows holds the value of the "deleted_rows" field. + DeletedRows int64 `json:"deleted_rows,omitempty"` + // ErrorMessage holds the value of the "error_message" field. + ErrorMessage *string `json:"error_message,omitempty"` + // CanceledBy holds the value of the "canceled_by" field. + CanceledBy *int64 `json:"canceled_by,omitempty"` + // CanceledAt holds the value of the "canceled_at" field. + CanceledAt *time.Time `json:"canceled_at,omitempty"` + // StartedAt holds the value of the "started_at" field. + StartedAt *time.Time `json:"started_at,omitempty"` + // FinishedAt holds the value of the "finished_at" field. + FinishedAt *time.Time `json:"finished_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UsageCleanupTask) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case usagecleanuptask.FieldFilters: + values[i] = new([]byte) + case usagecleanuptask.FieldID, usagecleanuptask.FieldCreatedBy, usagecleanuptask.FieldDeletedRows, usagecleanuptask.FieldCanceledBy: + values[i] = new(sql.NullInt64) + case usagecleanuptask.FieldStatus, usagecleanuptask.FieldErrorMessage: + values[i] = new(sql.NullString) + case usagecleanuptask.FieldCreatedAt, usagecleanuptask.FieldUpdatedAt, usagecleanuptask.FieldCanceledAt, usagecleanuptask.FieldStartedAt, usagecleanuptask.FieldFinishedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UsageCleanupTask fields. +func (_m *UsageCleanupTask) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case usagecleanuptask.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case usagecleanuptask.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case usagecleanuptask.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case usagecleanuptask.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case usagecleanuptask.FieldFilters: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field filters", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Filters); err != nil { + return fmt.Errorf("unmarshal field filters: %w", err) + } + } + case usagecleanuptask.FieldCreatedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + _m.CreatedBy = value.Int64 + } + case usagecleanuptask.FieldDeletedRows: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field deleted_rows", values[i]) + } else if value.Valid { + _m.DeletedRows = value.Int64 + } + case usagecleanuptask.FieldErrorMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field error_message", values[i]) + } else if value.Valid { + _m.ErrorMessage = new(string) + *_m.ErrorMessage = value.String + } + case usagecleanuptask.FieldCanceledBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field canceled_by", values[i]) + } else if value.Valid { + _m.CanceledBy = new(int64) + *_m.CanceledBy = value.Int64 + } + case usagecleanuptask.FieldCanceledAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field canceled_at", values[i]) + } else if value.Valid { + _m.CanceledAt = new(time.Time) + *_m.CanceledAt = value.Time + } + case usagecleanuptask.FieldStartedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field started_at", values[i]) + } else if value.Valid { + _m.StartedAt = new(time.Time) + *_m.StartedAt = value.Time + } + case usagecleanuptask.FieldFinishedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field finished_at", values[i]) + } else if value.Valid { + _m.FinishedAt = new(time.Time) + *_m.FinishedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UsageCleanupTask. +// This includes values selected through modifiers, order, etc. +func (_m *UsageCleanupTask) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this UsageCleanupTask. +// Note that you need to call UsageCleanupTask.Unwrap() before calling this method if this UsageCleanupTask +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UsageCleanupTask) Update() *UsageCleanupTaskUpdateOne { + return NewUsageCleanupTaskClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UsageCleanupTask entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UsageCleanupTask) Unwrap() *UsageCleanupTask { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UsageCleanupTask is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UsageCleanupTask) String() string { + var builder strings.Builder + builder.WriteString("UsageCleanupTask(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("filters=") + builder.WriteString(fmt.Sprintf("%v", _m.Filters)) + builder.WriteString(", ") + builder.WriteString("created_by=") + builder.WriteString(fmt.Sprintf("%v", _m.CreatedBy)) + builder.WriteString(", ") + builder.WriteString("deleted_rows=") + builder.WriteString(fmt.Sprintf("%v", _m.DeletedRows)) + builder.WriteString(", ") + if v := _m.ErrorMessage; v != nil { + builder.WriteString("error_message=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.CanceledBy; v != nil { + builder.WriteString("canceled_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.CanceledAt; v != nil { + builder.WriteString("canceled_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.StartedAt; v != nil { + builder.WriteString("started_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.FinishedAt; v != nil { + builder.WriteString("finished_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteByte(')') + return builder.String() +} + +// UsageCleanupTasks is a parsable slice of UsageCleanupTask. +type UsageCleanupTasks []*UsageCleanupTask diff --git a/backend/ent/usagecleanuptask/usagecleanuptask.go b/backend/ent/usagecleanuptask/usagecleanuptask.go new file mode 100644 index 00000000..a8ddd9a0 --- /dev/null +++ b/backend/ent/usagecleanuptask/usagecleanuptask.go @@ -0,0 +1,137 @@ +// Code generated by ent, DO NOT EDIT. + +package usagecleanuptask + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the usagecleanuptask type in the database. + Label = "usage_cleanup_task" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldFilters holds the string denoting the filters field in the database. + FieldFilters = "filters" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // FieldDeletedRows holds the string denoting the deleted_rows field in the database. + FieldDeletedRows = "deleted_rows" + // FieldErrorMessage holds the string denoting the error_message field in the database. + FieldErrorMessage = "error_message" + // FieldCanceledBy holds the string denoting the canceled_by field in the database. + FieldCanceledBy = "canceled_by" + // FieldCanceledAt holds the string denoting the canceled_at field in the database. + FieldCanceledAt = "canceled_at" + // FieldStartedAt holds the string denoting the started_at field in the database. + FieldStartedAt = "started_at" + // FieldFinishedAt holds the string denoting the finished_at field in the database. + FieldFinishedAt = "finished_at" + // Table holds the table name of the usagecleanuptask in the database. + Table = "usage_cleanup_tasks" +) + +// Columns holds all SQL columns for usagecleanuptask fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldStatus, + FieldFilters, + FieldCreatedBy, + FieldDeletedRows, + FieldErrorMessage, + FieldCanceledBy, + FieldCanceledAt, + FieldStartedAt, + FieldFinishedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultDeletedRows holds the default value on creation for the "deleted_rows" field. + DefaultDeletedRows int64 +) + +// OrderOption defines the ordering options for the UsageCleanupTask queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByDeletedRows orders the results by the deleted_rows field. +func ByDeletedRows(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedRows, opts...).ToFunc() +} + +// ByErrorMessage orders the results by the error_message field. +func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorMessage, opts...).ToFunc() +} + +// ByCanceledBy orders the results by the canceled_by field. +func ByCanceledBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCanceledBy, opts...).ToFunc() +} + +// ByCanceledAt orders the results by the canceled_at field. +func ByCanceledAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCanceledAt, opts...).ToFunc() +} + +// ByStartedAt orders the results by the started_at field. +func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartedAt, opts...).ToFunc() +} + +// ByFinishedAt orders the results by the finished_at field. +func ByFinishedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFinishedAt, opts...).ToFunc() +} diff --git a/backend/ent/usagecleanuptask/where.go b/backend/ent/usagecleanuptask/where.go new file mode 100644 index 00000000..99e790ca --- /dev/null +++ b/backend/ent/usagecleanuptask/where.go @@ -0,0 +1,620 @@ +// Code generated by ent, DO NOT EDIT. + +package usagecleanuptask + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStatus, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedBy, v)) +} + +// DeletedRows applies equality check predicate on the "deleted_rows" field. It's identical to DeletedRowsEQ. +func DeletedRows(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldDeletedRows, v)) +} + +// ErrorMessage applies equality check predicate on the "error_message" field. It's identical to ErrorMessageEQ. +func ErrorMessage(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldErrorMessage, v)) +} + +// CanceledBy applies equality check predicate on the "canceled_by" field. It's identical to CanceledByEQ. +func CanceledBy(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledBy, v)) +} + +// CanceledAt applies equality check predicate on the "canceled_at" field. It's identical to CanceledAtEQ. +func CanceledAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledAt, v)) +} + +// StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. +func StartedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStartedAt, v)) +} + +// FinishedAt applies equality check predicate on the "finished_at" field. It's identical to FinishedAtEQ. +func FinishedAt(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldFinishedAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContainsFold(FieldStatus, v)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCreatedBy, v)) +} + +// DeletedRowsEQ applies the EQ predicate on the "deleted_rows" field. +func DeletedRowsEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldDeletedRows, v)) +} + +// DeletedRowsNEQ applies the NEQ predicate on the "deleted_rows" field. +func DeletedRowsNEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldDeletedRows, v)) +} + +// DeletedRowsIn applies the In predicate on the "deleted_rows" field. +func DeletedRowsIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldDeletedRows, vs...)) +} + +// DeletedRowsNotIn applies the NotIn predicate on the "deleted_rows" field. +func DeletedRowsNotIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldDeletedRows, vs...)) +} + +// DeletedRowsGT applies the GT predicate on the "deleted_rows" field. +func DeletedRowsGT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldDeletedRows, v)) +} + +// DeletedRowsGTE applies the GTE predicate on the "deleted_rows" field. +func DeletedRowsGTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldDeletedRows, v)) +} + +// DeletedRowsLT applies the LT predicate on the "deleted_rows" field. +func DeletedRowsLT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldDeletedRows, v)) +} + +// DeletedRowsLTE applies the LTE predicate on the "deleted_rows" field. +func DeletedRowsLTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldDeletedRows, v)) +} + +// ErrorMessageEQ applies the EQ predicate on the "error_message" field. +func ErrorMessageEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldErrorMessage, v)) +} + +// ErrorMessageNEQ applies the NEQ predicate on the "error_message" field. +func ErrorMessageNEQ(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldErrorMessage, v)) +} + +// ErrorMessageIn applies the In predicate on the "error_message" field. +func ErrorMessageIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageNotIn applies the NotIn predicate on the "error_message" field. +func ErrorMessageNotIn(vs ...string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageGT applies the GT predicate on the "error_message" field. +func ErrorMessageGT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldErrorMessage, v)) +} + +// ErrorMessageGTE applies the GTE predicate on the "error_message" field. +func ErrorMessageGTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldErrorMessage, v)) +} + +// ErrorMessageLT applies the LT predicate on the "error_message" field. +func ErrorMessageLT(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldErrorMessage, v)) +} + +// ErrorMessageLTE applies the LTE predicate on the "error_message" field. +func ErrorMessageLTE(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldErrorMessage, v)) +} + +// ErrorMessageContains applies the Contains predicate on the "error_message" field. +func ErrorMessageContains(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContains(FieldErrorMessage, v)) +} + +// ErrorMessageHasPrefix applies the HasPrefix predicate on the "error_message" field. +func ErrorMessageHasPrefix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasPrefix(FieldErrorMessage, v)) +} + +// ErrorMessageHasSuffix applies the HasSuffix predicate on the "error_message" field. +func ErrorMessageHasSuffix(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldHasSuffix(FieldErrorMessage, v)) +} + +// ErrorMessageIsNil applies the IsNil predicate on the "error_message" field. +func ErrorMessageIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldErrorMessage)) +} + +// ErrorMessageNotNil applies the NotNil predicate on the "error_message" field. +func ErrorMessageNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldErrorMessage)) +} + +// ErrorMessageEqualFold applies the EqualFold predicate on the "error_message" field. +func ErrorMessageEqualFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEqualFold(FieldErrorMessage, v)) +} + +// ErrorMessageContainsFold applies the ContainsFold predicate on the "error_message" field. +func ErrorMessageContainsFold(v string) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldContainsFold(FieldErrorMessage, v)) +} + +// CanceledByEQ applies the EQ predicate on the "canceled_by" field. +func CanceledByEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledBy, v)) +} + +// CanceledByNEQ applies the NEQ predicate on the "canceled_by" field. +func CanceledByNEQ(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCanceledBy, v)) +} + +// CanceledByIn applies the In predicate on the "canceled_by" field. +func CanceledByIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCanceledBy, vs...)) +} + +// CanceledByNotIn applies the NotIn predicate on the "canceled_by" field. +func CanceledByNotIn(vs ...int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCanceledBy, vs...)) +} + +// CanceledByGT applies the GT predicate on the "canceled_by" field. +func CanceledByGT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCanceledBy, v)) +} + +// CanceledByGTE applies the GTE predicate on the "canceled_by" field. +func CanceledByGTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCanceledBy, v)) +} + +// CanceledByLT applies the LT predicate on the "canceled_by" field. +func CanceledByLT(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCanceledBy, v)) +} + +// CanceledByLTE applies the LTE predicate on the "canceled_by" field. +func CanceledByLTE(v int64) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCanceledBy, v)) +} + +// CanceledByIsNil applies the IsNil predicate on the "canceled_by" field. +func CanceledByIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldCanceledBy)) +} + +// CanceledByNotNil applies the NotNil predicate on the "canceled_by" field. +func CanceledByNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldCanceledBy)) +} + +// CanceledAtEQ applies the EQ predicate on the "canceled_at" field. +func CanceledAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldCanceledAt, v)) +} + +// CanceledAtNEQ applies the NEQ predicate on the "canceled_at" field. +func CanceledAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldCanceledAt, v)) +} + +// CanceledAtIn applies the In predicate on the "canceled_at" field. +func CanceledAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldCanceledAt, vs...)) +} + +// CanceledAtNotIn applies the NotIn predicate on the "canceled_at" field. +func CanceledAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldCanceledAt, vs...)) +} + +// CanceledAtGT applies the GT predicate on the "canceled_at" field. +func CanceledAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldCanceledAt, v)) +} + +// CanceledAtGTE applies the GTE predicate on the "canceled_at" field. +func CanceledAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldCanceledAt, v)) +} + +// CanceledAtLT applies the LT predicate on the "canceled_at" field. +func CanceledAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldCanceledAt, v)) +} + +// CanceledAtLTE applies the LTE predicate on the "canceled_at" field. +func CanceledAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldCanceledAt, v)) +} + +// CanceledAtIsNil applies the IsNil predicate on the "canceled_at" field. +func CanceledAtIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldCanceledAt)) +} + +// CanceledAtNotNil applies the NotNil predicate on the "canceled_at" field. +func CanceledAtNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldCanceledAt)) +} + +// StartedAtEQ applies the EQ predicate on the "started_at" field. +func StartedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldStartedAt, v)) +} + +// StartedAtNEQ applies the NEQ predicate on the "started_at" field. +func StartedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldStartedAt, v)) +} + +// StartedAtIn applies the In predicate on the "started_at" field. +func StartedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldStartedAt, vs...)) +} + +// StartedAtNotIn applies the NotIn predicate on the "started_at" field. +func StartedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldStartedAt, vs...)) +} + +// StartedAtGT applies the GT predicate on the "started_at" field. +func StartedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldStartedAt, v)) +} + +// StartedAtGTE applies the GTE predicate on the "started_at" field. +func StartedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldStartedAt, v)) +} + +// StartedAtLT applies the LT predicate on the "started_at" field. +func StartedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldStartedAt, v)) +} + +// StartedAtLTE applies the LTE predicate on the "started_at" field. +func StartedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldStartedAt, v)) +} + +// StartedAtIsNil applies the IsNil predicate on the "started_at" field. +func StartedAtIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldStartedAt)) +} + +// StartedAtNotNil applies the NotNil predicate on the "started_at" field. +func StartedAtNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldStartedAt)) +} + +// FinishedAtEQ applies the EQ predicate on the "finished_at" field. +func FinishedAtEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldEQ(FieldFinishedAt, v)) +} + +// FinishedAtNEQ applies the NEQ predicate on the "finished_at" field. +func FinishedAtNEQ(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNEQ(FieldFinishedAt, v)) +} + +// FinishedAtIn applies the In predicate on the "finished_at" field. +func FinishedAtIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIn(FieldFinishedAt, vs...)) +} + +// FinishedAtNotIn applies the NotIn predicate on the "finished_at" field. +func FinishedAtNotIn(vs ...time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotIn(FieldFinishedAt, vs...)) +} + +// FinishedAtGT applies the GT predicate on the "finished_at" field. +func FinishedAtGT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGT(FieldFinishedAt, v)) +} + +// FinishedAtGTE applies the GTE predicate on the "finished_at" field. +func FinishedAtGTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldGTE(FieldFinishedAt, v)) +} + +// FinishedAtLT applies the LT predicate on the "finished_at" field. +func FinishedAtLT(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLT(FieldFinishedAt, v)) +} + +// FinishedAtLTE applies the LTE predicate on the "finished_at" field. +func FinishedAtLTE(v time.Time) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldLTE(FieldFinishedAt, v)) +} + +// FinishedAtIsNil applies the IsNil predicate on the "finished_at" field. +func FinishedAtIsNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldIsNull(FieldFinishedAt)) +} + +// FinishedAtNotNil applies the NotNil predicate on the "finished_at" field. +func FinishedAtNotNil() predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.FieldNotNull(FieldFinishedAt)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UsageCleanupTask) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UsageCleanupTask) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UsageCleanupTask) predicate.UsageCleanupTask { + return predicate.UsageCleanupTask(sql.NotPredicates(p)) +} diff --git a/backend/ent/usagecleanuptask_create.go b/backend/ent/usagecleanuptask_create.go new file mode 100644 index 00000000..0b1dcff5 --- /dev/null +++ b/backend/ent/usagecleanuptask_create.go @@ -0,0 +1,1190 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskCreate is the builder for creating a UsageCleanupTask entity. +type UsageCleanupTaskCreate struct { + config + mutation *UsageCleanupTaskMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UsageCleanupTaskCreate) SetCreatedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableCreatedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UsageCleanupTaskCreate) SetUpdatedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableUpdatedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *UsageCleanupTaskCreate) SetStatus(v string) *UsageCleanupTaskCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetFilters sets the "filters" field. +func (_c *UsageCleanupTaskCreate) SetFilters(v json.RawMessage) *UsageCleanupTaskCreate { + _c.mutation.SetFilters(v) + return _c +} + +// SetCreatedBy sets the "created_by" field. +func (_c *UsageCleanupTaskCreate) SetCreatedBy(v int64) *UsageCleanupTaskCreate { + _c.mutation.SetCreatedBy(v) + return _c +} + +// SetDeletedRows sets the "deleted_rows" field. +func (_c *UsageCleanupTaskCreate) SetDeletedRows(v int64) *UsageCleanupTaskCreate { + _c.mutation.SetDeletedRows(v) + return _c +} + +// SetNillableDeletedRows sets the "deleted_rows" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableDeletedRows(v *int64) *UsageCleanupTaskCreate { + if v != nil { + _c.SetDeletedRows(*v) + } + return _c +} + +// SetErrorMessage sets the "error_message" field. +func (_c *UsageCleanupTaskCreate) SetErrorMessage(v string) *UsageCleanupTaskCreate { + _c.mutation.SetErrorMessage(v) + return _c +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableErrorMessage(v *string) *UsageCleanupTaskCreate { + if v != nil { + _c.SetErrorMessage(*v) + } + return _c +} + +// SetCanceledBy sets the "canceled_by" field. +func (_c *UsageCleanupTaskCreate) SetCanceledBy(v int64) *UsageCleanupTaskCreate { + _c.mutation.SetCanceledBy(v) + return _c +} + +// SetNillableCanceledBy sets the "canceled_by" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableCanceledBy(v *int64) *UsageCleanupTaskCreate { + if v != nil { + _c.SetCanceledBy(*v) + } + return _c +} + +// SetCanceledAt sets the "canceled_at" field. +func (_c *UsageCleanupTaskCreate) SetCanceledAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetCanceledAt(v) + return _c +} + +// SetNillableCanceledAt sets the "canceled_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableCanceledAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetCanceledAt(*v) + } + return _c +} + +// SetStartedAt sets the "started_at" field. +func (_c *UsageCleanupTaskCreate) SetStartedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetStartedAt(v) + return _c +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableStartedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetStartedAt(*v) + } + return _c +} + +// SetFinishedAt sets the "finished_at" field. +func (_c *UsageCleanupTaskCreate) SetFinishedAt(v time.Time) *UsageCleanupTaskCreate { + _c.mutation.SetFinishedAt(v) + return _c +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_c *UsageCleanupTaskCreate) SetNillableFinishedAt(v *time.Time) *UsageCleanupTaskCreate { + if v != nil { + _c.SetFinishedAt(*v) + } + return _c +} + +// Mutation returns the UsageCleanupTaskMutation object of the builder. +func (_c *UsageCleanupTaskCreate) Mutation() *UsageCleanupTaskMutation { + return _c.mutation +} + +// Save creates the UsageCleanupTask in the database. +func (_c *UsageCleanupTaskCreate) Save(ctx context.Context) (*UsageCleanupTask, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UsageCleanupTaskCreate) SaveX(ctx context.Context) *UsageCleanupTask { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UsageCleanupTaskCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UsageCleanupTaskCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UsageCleanupTaskCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := usagecleanuptask.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := usagecleanuptask.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.DeletedRows(); !ok { + v := usagecleanuptask.DefaultDeletedRows + _c.mutation.SetDeletedRows(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UsageCleanupTaskCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UsageCleanupTask.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UsageCleanupTask.updated_at"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "UsageCleanupTask.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := usagecleanuptask.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UsageCleanupTask.status": %w`, err)} + } + } + if _, ok := _c.mutation.Filters(); !ok { + return &ValidationError{Name: "filters", err: errors.New(`ent: missing required field "UsageCleanupTask.filters"`)} + } + if _, ok := _c.mutation.CreatedBy(); !ok { + return &ValidationError{Name: "created_by", err: errors.New(`ent: missing required field "UsageCleanupTask.created_by"`)} + } + if _, ok := _c.mutation.DeletedRows(); !ok { + return &ValidationError{Name: "deleted_rows", err: errors.New(`ent: missing required field "UsageCleanupTask.deleted_rows"`)} + } + return nil +} + +func (_c *UsageCleanupTaskCreate) sqlSave(ctx context.Context) (*UsageCleanupTask, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UsageCleanupTaskCreate) createSpec() (*UsageCleanupTask, *sqlgraph.CreateSpec) { + var ( + _node = &UsageCleanupTask{config: _c.config} + _spec = sqlgraph.NewCreateSpec(usagecleanuptask.Table, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(usagecleanuptask.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Filters(); ok { + _spec.SetField(usagecleanuptask.FieldFilters, field.TypeJSON, value) + _node.Filters = value + } + if value, ok := _c.mutation.CreatedBy(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + _node.CreatedBy = value + } + if value, ok := _c.mutation.DeletedRows(); ok { + _spec.SetField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + _node.DeletedRows = value + } + if value, ok := _c.mutation.ErrorMessage(); ok { + _spec.SetField(usagecleanuptask.FieldErrorMessage, field.TypeString, value) + _node.ErrorMessage = &value + } + if value, ok := _c.mutation.CanceledBy(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + _node.CanceledBy = &value + } + if value, ok := _c.mutation.CanceledAt(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledAt, field.TypeTime, value) + _node.CanceledAt = &value + } + if value, ok := _c.mutation.StartedAt(); ok { + _spec.SetField(usagecleanuptask.FieldStartedAt, field.TypeTime, value) + _node.StartedAt = &value + } + if value, ok := _c.mutation.FinishedAt(); ok { + _spec.SetField(usagecleanuptask.FieldFinishedAt, field.TypeTime, value) + _node.FinishedAt = &value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UsageCleanupTask.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UsageCleanupTaskUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UsageCleanupTaskCreate) OnConflict(opts ...sql.ConflictOption) *UsageCleanupTaskUpsertOne { + _c.conflict = opts + return &UsageCleanupTaskUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UsageCleanupTaskCreate) OnConflictColumns(columns ...string) *UsageCleanupTaskUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UsageCleanupTaskUpsertOne{ + create: _c, + } +} + +type ( + // UsageCleanupTaskUpsertOne is the builder for "upsert"-ing + // one UsageCleanupTask node. + UsageCleanupTaskUpsertOne struct { + create *UsageCleanupTaskCreate + } + + // UsageCleanupTaskUpsert is the "OnConflict" setter. + UsageCleanupTaskUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UsageCleanupTaskUpsert) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateUpdatedAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldUpdatedAt) + return u +} + +// SetStatus sets the "status" field. +func (u *UsageCleanupTaskUpsert) SetStatus(v string) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateStatus() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldStatus) + return u +} + +// SetFilters sets the "filters" field. +func (u *UsageCleanupTaskUpsert) SetFilters(v json.RawMessage) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldFilters, v) + return u +} + +// UpdateFilters sets the "filters" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateFilters() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldFilters) + return u +} + +// SetCreatedBy sets the "created_by" field. +func (u *UsageCleanupTaskUpsert) SetCreatedBy(v int64) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldCreatedBy, v) + return u +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateCreatedBy() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldCreatedBy) + return u +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *UsageCleanupTaskUpsert) AddCreatedBy(v int64) *UsageCleanupTaskUpsert { + u.Add(usagecleanuptask.FieldCreatedBy, v) + return u +} + +// SetDeletedRows sets the "deleted_rows" field. +func (u *UsageCleanupTaskUpsert) SetDeletedRows(v int64) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldDeletedRows, v) + return u +} + +// UpdateDeletedRows sets the "deleted_rows" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateDeletedRows() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldDeletedRows) + return u +} + +// AddDeletedRows adds v to the "deleted_rows" field. +func (u *UsageCleanupTaskUpsert) AddDeletedRows(v int64) *UsageCleanupTaskUpsert { + u.Add(usagecleanuptask.FieldDeletedRows, v) + return u +} + +// SetErrorMessage sets the "error_message" field. +func (u *UsageCleanupTaskUpsert) SetErrorMessage(v string) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldErrorMessage, v) + return u +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateErrorMessage() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldErrorMessage) + return u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *UsageCleanupTaskUpsert) ClearErrorMessage() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldErrorMessage) + return u +} + +// SetCanceledBy sets the "canceled_by" field. +func (u *UsageCleanupTaskUpsert) SetCanceledBy(v int64) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldCanceledBy, v) + return u +} + +// UpdateCanceledBy sets the "canceled_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateCanceledBy() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldCanceledBy) + return u +} + +// AddCanceledBy adds v to the "canceled_by" field. +func (u *UsageCleanupTaskUpsert) AddCanceledBy(v int64) *UsageCleanupTaskUpsert { + u.Add(usagecleanuptask.FieldCanceledBy, v) + return u +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (u *UsageCleanupTaskUpsert) ClearCanceledBy() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldCanceledBy) + return u +} + +// SetCanceledAt sets the "canceled_at" field. +func (u *UsageCleanupTaskUpsert) SetCanceledAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldCanceledAt, v) + return u +} + +// UpdateCanceledAt sets the "canceled_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateCanceledAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldCanceledAt) + return u +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (u *UsageCleanupTaskUpsert) ClearCanceledAt() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldCanceledAt) + return u +} + +// SetStartedAt sets the "started_at" field. +func (u *UsageCleanupTaskUpsert) SetStartedAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldStartedAt, v) + return u +} + +// UpdateStartedAt sets the "started_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateStartedAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldStartedAt) + return u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (u *UsageCleanupTaskUpsert) ClearStartedAt() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldStartedAt) + return u +} + +// SetFinishedAt sets the "finished_at" field. +func (u *UsageCleanupTaskUpsert) SetFinishedAt(v time.Time) *UsageCleanupTaskUpsert { + u.Set(usagecleanuptask.FieldFinishedAt, v) + return u +} + +// UpdateFinishedAt sets the "finished_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsert) UpdateFinishedAt() *UsageCleanupTaskUpsert { + u.SetExcluded(usagecleanuptask.FieldFinishedAt) + return u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (u *UsageCleanupTaskUpsert) ClearFinishedAt() *UsageCleanupTaskUpsert { + u.SetNull(usagecleanuptask.FieldFinishedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertOne) UpdateNewValues() *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(usagecleanuptask.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertOne) Ignore() *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UsageCleanupTaskUpsertOne) DoNothing() *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UsageCleanupTaskCreate.OnConflict +// documentation for more info. +func (u *UsageCleanupTaskUpsertOne) Update(set func(*UsageCleanupTaskUpsert)) *UsageCleanupTaskUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UsageCleanupTaskUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UsageCleanupTaskUpsertOne) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateUpdatedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UsageCleanupTaskUpsertOne) SetStatus(v string) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateStatus() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStatus() + }) +} + +// SetFilters sets the "filters" field. +func (u *UsageCleanupTaskUpsertOne) SetFilters(v json.RawMessage) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFilters(v) + }) +} + +// UpdateFilters sets the "filters" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateFilters() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFilters() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *UsageCleanupTaskUpsertOne) SetCreatedBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *UsageCleanupTaskUpsertOne) AddCreatedBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateCreatedBy() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCreatedBy() + }) +} + +// SetDeletedRows sets the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertOne) SetDeletedRows(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetDeletedRows(v) + }) +} + +// AddDeletedRows adds v to the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertOne) AddDeletedRows(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddDeletedRows(v) + }) +} + +// UpdateDeletedRows sets the "deleted_rows" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateDeletedRows() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateDeletedRows() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *UsageCleanupTaskUpsertOne) SetErrorMessage(v string) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateErrorMessage() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *UsageCleanupTaskUpsertOne) ClearErrorMessage() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearErrorMessage() + }) +} + +// SetCanceledBy sets the "canceled_by" field. +func (u *UsageCleanupTaskUpsertOne) SetCanceledBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledBy(v) + }) +} + +// AddCanceledBy adds v to the "canceled_by" field. +func (u *UsageCleanupTaskUpsertOne) AddCanceledBy(v int64) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCanceledBy(v) + }) +} + +// UpdateCanceledBy sets the "canceled_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateCanceledBy() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledBy() + }) +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (u *UsageCleanupTaskUpsertOne) ClearCanceledBy() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledBy() + }) +} + +// SetCanceledAt sets the "canceled_at" field. +func (u *UsageCleanupTaskUpsertOne) SetCanceledAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledAt(v) + }) +} + +// UpdateCanceledAt sets the "canceled_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateCanceledAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledAt() + }) +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (u *UsageCleanupTaskUpsertOne) ClearCanceledAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledAt() + }) +} + +// SetStartedAt sets the "started_at" field. +func (u *UsageCleanupTaskUpsertOne) SetStartedAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStartedAt(v) + }) +} + +// UpdateStartedAt sets the "started_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateStartedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStartedAt() + }) +} + +// ClearStartedAt clears the value of the "started_at" field. +func (u *UsageCleanupTaskUpsertOne) ClearStartedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearStartedAt() + }) +} + +// SetFinishedAt sets the "finished_at" field. +func (u *UsageCleanupTaskUpsertOne) SetFinishedAt(v time.Time) *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFinishedAt(v) + }) +} + +// UpdateFinishedAt sets the "finished_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertOne) UpdateFinishedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFinishedAt() + }) +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (u *UsageCleanupTaskUpsertOne) ClearFinishedAt() *UsageCleanupTaskUpsertOne { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearFinishedAt() + }) +} + +// Exec executes the query. +func (u *UsageCleanupTaskUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UsageCleanupTaskCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UsageCleanupTaskUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UsageCleanupTaskUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UsageCleanupTaskUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UsageCleanupTaskCreateBulk is the builder for creating many UsageCleanupTask entities in bulk. +type UsageCleanupTaskCreateBulk struct { + config + err error + builders []*UsageCleanupTaskCreate + conflict []sql.ConflictOption +} + +// Save creates the UsageCleanupTask entities in the database. +func (_c *UsageCleanupTaskCreateBulk) Save(ctx context.Context) ([]*UsageCleanupTask, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UsageCleanupTask, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UsageCleanupTaskMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UsageCleanupTaskCreateBulk) SaveX(ctx context.Context) []*UsageCleanupTask { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UsageCleanupTaskCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UsageCleanupTaskCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UsageCleanupTask.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UsageCleanupTaskUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UsageCleanupTaskCreateBulk) OnConflict(opts ...sql.ConflictOption) *UsageCleanupTaskUpsertBulk { + _c.conflict = opts + return &UsageCleanupTaskUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UsageCleanupTaskCreateBulk) OnConflictColumns(columns ...string) *UsageCleanupTaskUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UsageCleanupTaskUpsertBulk{ + create: _c, + } +} + +// UsageCleanupTaskUpsertBulk is the builder for "upsert"-ing +// a bulk of UsageCleanupTask nodes. +type UsageCleanupTaskUpsertBulk struct { + create *UsageCleanupTaskCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertBulk) UpdateNewValues() *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(usagecleanuptask.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UsageCleanupTask.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UsageCleanupTaskUpsertBulk) Ignore() *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UsageCleanupTaskUpsertBulk) DoNothing() *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UsageCleanupTaskCreateBulk.OnConflict +// documentation for more info. +func (u *UsageCleanupTaskUpsertBulk) Update(set func(*UsageCleanupTaskUpsert)) *UsageCleanupTaskUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UsageCleanupTaskUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateUpdatedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UsageCleanupTaskUpsertBulk) SetStatus(v string) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateStatus() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStatus() + }) +} + +// SetFilters sets the "filters" field. +func (u *UsageCleanupTaskUpsertBulk) SetFilters(v json.RawMessage) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFilters(v) + }) +} + +// UpdateFilters sets the "filters" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateFilters() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFilters() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *UsageCleanupTaskUpsertBulk) SetCreatedBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *UsageCleanupTaskUpsertBulk) AddCreatedBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateCreatedBy() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCreatedBy() + }) +} + +// SetDeletedRows sets the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertBulk) SetDeletedRows(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetDeletedRows(v) + }) +} + +// AddDeletedRows adds v to the "deleted_rows" field. +func (u *UsageCleanupTaskUpsertBulk) AddDeletedRows(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddDeletedRows(v) + }) +} + +// UpdateDeletedRows sets the "deleted_rows" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateDeletedRows() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateDeletedRows() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *UsageCleanupTaskUpsertBulk) SetErrorMessage(v string) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateErrorMessage() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *UsageCleanupTaskUpsertBulk) ClearErrorMessage() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearErrorMessage() + }) +} + +// SetCanceledBy sets the "canceled_by" field. +func (u *UsageCleanupTaskUpsertBulk) SetCanceledBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledBy(v) + }) +} + +// AddCanceledBy adds v to the "canceled_by" field. +func (u *UsageCleanupTaskUpsertBulk) AddCanceledBy(v int64) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.AddCanceledBy(v) + }) +} + +// UpdateCanceledBy sets the "canceled_by" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateCanceledBy() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledBy() + }) +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (u *UsageCleanupTaskUpsertBulk) ClearCanceledBy() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledBy() + }) +} + +// SetCanceledAt sets the "canceled_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetCanceledAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetCanceledAt(v) + }) +} + +// UpdateCanceledAt sets the "canceled_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateCanceledAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateCanceledAt() + }) +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (u *UsageCleanupTaskUpsertBulk) ClearCanceledAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearCanceledAt() + }) +} + +// SetStartedAt sets the "started_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetStartedAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetStartedAt(v) + }) +} + +// UpdateStartedAt sets the "started_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateStartedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateStartedAt() + }) +} + +// ClearStartedAt clears the value of the "started_at" field. +func (u *UsageCleanupTaskUpsertBulk) ClearStartedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearStartedAt() + }) +} + +// SetFinishedAt sets the "finished_at" field. +func (u *UsageCleanupTaskUpsertBulk) SetFinishedAt(v time.Time) *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.SetFinishedAt(v) + }) +} + +// UpdateFinishedAt sets the "finished_at" field to the value that was provided on create. +func (u *UsageCleanupTaskUpsertBulk) UpdateFinishedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.UpdateFinishedAt() + }) +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (u *UsageCleanupTaskUpsertBulk) ClearFinishedAt() *UsageCleanupTaskUpsertBulk { + return u.Update(func(s *UsageCleanupTaskUpsert) { + s.ClearFinishedAt() + }) +} + +// Exec executes the query. +func (u *UsageCleanupTaskUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UsageCleanupTaskCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UsageCleanupTaskCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UsageCleanupTaskUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usagecleanuptask_delete.go b/backend/ent/usagecleanuptask_delete.go new file mode 100644 index 00000000..158555f7 --- /dev/null +++ b/backend/ent/usagecleanuptask_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskDelete is the builder for deleting a UsageCleanupTask entity. +type UsageCleanupTaskDelete struct { + config + hooks []Hook + mutation *UsageCleanupTaskMutation +} + +// Where appends a list predicates to the UsageCleanupTaskDelete builder. +func (_d *UsageCleanupTaskDelete) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UsageCleanupTaskDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UsageCleanupTaskDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UsageCleanupTaskDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(usagecleanuptask.Table, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UsageCleanupTaskDeleteOne is the builder for deleting a single UsageCleanupTask entity. +type UsageCleanupTaskDeleteOne struct { + _d *UsageCleanupTaskDelete +} + +// Where appends a list predicates to the UsageCleanupTaskDelete builder. +func (_d *UsageCleanupTaskDeleteOne) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UsageCleanupTaskDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{usagecleanuptask.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UsageCleanupTaskDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usagecleanuptask_query.go b/backend/ent/usagecleanuptask_query.go new file mode 100644 index 00000000..9d8d5410 --- /dev/null +++ b/backend/ent/usagecleanuptask_query.go @@ -0,0 +1,564 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskQuery is the builder for querying UsageCleanupTask entities. +type UsageCleanupTaskQuery struct { + config + ctx *QueryContext + order []usagecleanuptask.OrderOption + inters []Interceptor + predicates []predicate.UsageCleanupTask + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UsageCleanupTaskQuery builder. +func (_q *UsageCleanupTaskQuery) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UsageCleanupTaskQuery) Limit(limit int) *UsageCleanupTaskQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UsageCleanupTaskQuery) Offset(offset int) *UsageCleanupTaskQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UsageCleanupTaskQuery) Unique(unique bool) *UsageCleanupTaskQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UsageCleanupTaskQuery) Order(o ...usagecleanuptask.OrderOption) *UsageCleanupTaskQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first UsageCleanupTask entity from the query. +// Returns a *NotFoundError when no UsageCleanupTask was found. +func (_q *UsageCleanupTaskQuery) First(ctx context.Context) (*UsageCleanupTask, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{usagecleanuptask.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) FirstX(ctx context.Context) *UsageCleanupTask { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UsageCleanupTask ID from the query. +// Returns a *NotFoundError when no UsageCleanupTask ID was found. +func (_q *UsageCleanupTaskQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{usagecleanuptask.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UsageCleanupTask entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UsageCleanupTask entity is found. +// Returns a *NotFoundError when no UsageCleanupTask entities are found. +func (_q *UsageCleanupTaskQuery) Only(ctx context.Context) (*UsageCleanupTask, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{usagecleanuptask.Label} + default: + return nil, &NotSingularError{usagecleanuptask.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) OnlyX(ctx context.Context) *UsageCleanupTask { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UsageCleanupTask ID in the query. +// Returns a *NotSingularError when more than one UsageCleanupTask ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UsageCleanupTaskQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{usagecleanuptask.Label} + default: + err = &NotSingularError{usagecleanuptask.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UsageCleanupTasks. +func (_q *UsageCleanupTaskQuery) All(ctx context.Context) ([]*UsageCleanupTask, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UsageCleanupTask, *UsageCleanupTaskQuery]() + return withInterceptors[[]*UsageCleanupTask](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) AllX(ctx context.Context) []*UsageCleanupTask { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UsageCleanupTask IDs. +func (_q *UsageCleanupTaskQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(usagecleanuptask.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UsageCleanupTaskQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UsageCleanupTaskQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UsageCleanupTaskQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UsageCleanupTaskQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UsageCleanupTaskQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UsageCleanupTaskQuery) Clone() *UsageCleanupTaskQuery { + if _q == nil { + return nil + } + return &UsageCleanupTaskQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]usagecleanuptask.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UsageCleanupTask{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UsageCleanupTask.Query(). +// GroupBy(usagecleanuptask.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UsageCleanupTaskQuery) GroupBy(field string, fields ...string) *UsageCleanupTaskGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UsageCleanupTaskGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = usagecleanuptask.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UsageCleanupTask.Query(). +// Select(usagecleanuptask.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UsageCleanupTaskQuery) Select(fields ...string) *UsageCleanupTaskSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UsageCleanupTaskSelect{UsageCleanupTaskQuery: _q} + sbuild.label = usagecleanuptask.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UsageCleanupTaskSelect configured with the given aggregations. +func (_q *UsageCleanupTaskQuery) Aggregate(fns ...AggregateFunc) *UsageCleanupTaskSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UsageCleanupTaskQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !usagecleanuptask.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UsageCleanupTaskQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UsageCleanupTask, error) { + var ( + nodes = []*UsageCleanupTask{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UsageCleanupTask).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UsageCleanupTask{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *UsageCleanupTaskQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UsageCleanupTaskQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(usagecleanuptask.Table, usagecleanuptask.Columns, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usagecleanuptask.FieldID) + for i := range fields { + if fields[i] != usagecleanuptask.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UsageCleanupTaskQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(usagecleanuptask.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = usagecleanuptask.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UsageCleanupTaskQuery) ForUpdate(opts ...sql.LockOption) *UsageCleanupTaskQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UsageCleanupTaskQuery) ForShare(opts ...sql.LockOption) *UsageCleanupTaskQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UsageCleanupTaskGroupBy is the group-by builder for UsageCleanupTask entities. +type UsageCleanupTaskGroupBy struct { + selector + build *UsageCleanupTaskQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UsageCleanupTaskGroupBy) Aggregate(fns ...AggregateFunc) *UsageCleanupTaskGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UsageCleanupTaskGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UsageCleanupTaskQuery, *UsageCleanupTaskGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UsageCleanupTaskGroupBy) sqlScan(ctx context.Context, root *UsageCleanupTaskQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UsageCleanupTaskSelect is the builder for selecting fields of UsageCleanupTask entities. +type UsageCleanupTaskSelect struct { + *UsageCleanupTaskQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UsageCleanupTaskSelect) Aggregate(fns ...AggregateFunc) *UsageCleanupTaskSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UsageCleanupTaskSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UsageCleanupTaskQuery, *UsageCleanupTaskSelect](ctx, _s.UsageCleanupTaskQuery, _s, _s.inters, v) +} + +func (_s *UsageCleanupTaskSelect) sqlScan(ctx context.Context, root *UsageCleanupTaskQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/usagecleanuptask_update.go b/backend/ent/usagecleanuptask_update.go new file mode 100644 index 00000000..604202c6 --- /dev/null +++ b/backend/ent/usagecleanuptask_update.go @@ -0,0 +1,702 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" +) + +// UsageCleanupTaskUpdate is the builder for updating UsageCleanupTask entities. +type UsageCleanupTaskUpdate struct { + config + hooks []Hook + mutation *UsageCleanupTaskMutation +} + +// Where appends a list predicates to the UsageCleanupTaskUpdate builder. +func (_u *UsageCleanupTaskUpdate) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UsageCleanupTaskUpdate) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UsageCleanupTaskUpdate) SetStatus(v string) *UsageCleanupTaskUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableStatus(v *string) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetFilters sets the "filters" field. +func (_u *UsageCleanupTaskUpdate) SetFilters(v json.RawMessage) *UsageCleanupTaskUpdate { + _u.mutation.SetFilters(v) + return _u +} + +// AppendFilters appends value to the "filters" field. +func (_u *UsageCleanupTaskUpdate) AppendFilters(v json.RawMessage) *UsageCleanupTaskUpdate { + _u.mutation.AppendFilters(v) + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *UsageCleanupTaskUpdate) SetCreatedBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableCreatedBy(v *int64) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *UsageCleanupTaskUpdate) AddCreatedBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.AddCreatedBy(v) + return _u +} + +// SetDeletedRows sets the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdate) SetDeletedRows(v int64) *UsageCleanupTaskUpdate { + _u.mutation.ResetDeletedRows() + _u.mutation.SetDeletedRows(v) + return _u +} + +// SetNillableDeletedRows sets the "deleted_rows" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableDeletedRows(v *int64) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetDeletedRows(*v) + } + return _u +} + +// AddDeletedRows adds value to the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdate) AddDeletedRows(v int64) *UsageCleanupTaskUpdate { + _u.mutation.AddDeletedRows(v) + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *UsageCleanupTaskUpdate) SetErrorMessage(v string) *UsageCleanupTaskUpdate { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableErrorMessage(v *string) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *UsageCleanupTaskUpdate) ClearErrorMessage() *UsageCleanupTaskUpdate { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetCanceledBy sets the "canceled_by" field. +func (_u *UsageCleanupTaskUpdate) SetCanceledBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.ResetCanceledBy() + _u.mutation.SetCanceledBy(v) + return _u +} + +// SetNillableCanceledBy sets the "canceled_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableCanceledBy(v *int64) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetCanceledBy(*v) + } + return _u +} + +// AddCanceledBy adds value to the "canceled_by" field. +func (_u *UsageCleanupTaskUpdate) AddCanceledBy(v int64) *UsageCleanupTaskUpdate { + _u.mutation.AddCanceledBy(v) + return _u +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (_u *UsageCleanupTaskUpdate) ClearCanceledBy() *UsageCleanupTaskUpdate { + _u.mutation.ClearCanceledBy() + return _u +} + +// SetCanceledAt sets the "canceled_at" field. +func (_u *UsageCleanupTaskUpdate) SetCanceledAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetCanceledAt(v) + return _u +} + +// SetNillableCanceledAt sets the "canceled_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableCanceledAt(v *time.Time) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetCanceledAt(*v) + } + return _u +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (_u *UsageCleanupTaskUpdate) ClearCanceledAt() *UsageCleanupTaskUpdate { + _u.mutation.ClearCanceledAt() + return _u +} + +// SetStartedAt sets the "started_at" field. +func (_u *UsageCleanupTaskUpdate) SetStartedAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetStartedAt(v) + return _u +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableStartedAt(v *time.Time) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetStartedAt(*v) + } + return _u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (_u *UsageCleanupTaskUpdate) ClearStartedAt() *UsageCleanupTaskUpdate { + _u.mutation.ClearStartedAt() + return _u +} + +// SetFinishedAt sets the "finished_at" field. +func (_u *UsageCleanupTaskUpdate) SetFinishedAt(v time.Time) *UsageCleanupTaskUpdate { + _u.mutation.SetFinishedAt(v) + return _u +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdate) SetNillableFinishedAt(v *time.Time) *UsageCleanupTaskUpdate { + if v != nil { + _u.SetFinishedAt(*v) + } + return _u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (_u *UsageCleanupTaskUpdate) ClearFinishedAt() *UsageCleanupTaskUpdate { + _u.mutation.ClearFinishedAt() + return _u +} + +// Mutation returns the UsageCleanupTaskMutation object of the builder. +func (_u *UsageCleanupTaskUpdate) Mutation() *UsageCleanupTaskMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UsageCleanupTaskUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UsageCleanupTaskUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UsageCleanupTaskUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := usagecleanuptask.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UsageCleanupTaskUpdate) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usagecleanuptask.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UsageCleanupTask.status": %w`, err)} + } + } + return nil +} + +func (_u *UsageCleanupTaskUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usagecleanuptask.Table, usagecleanuptask.Columns, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usagecleanuptask.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Filters(); ok { + _spec.SetField(usagecleanuptask.FieldFilters, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedFilters(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, usagecleanuptask.FieldFilters, value) + }) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.DeletedRows(); ok { + _spec.SetField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedDeletedRows(); ok { + _spec.AddField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(usagecleanuptask.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(usagecleanuptask.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.CanceledBy(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCanceledBy(); ok { + _spec.AddField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if _u.mutation.CanceledByCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledBy, field.TypeInt64) + } + if value, ok := _u.mutation.CanceledAt(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledAt, field.TypeTime, value) + } + if _u.mutation.CanceledAtCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledAt, field.TypeTime) + } + if value, ok := _u.mutation.StartedAt(); ok { + _spec.SetField(usagecleanuptask.FieldStartedAt, field.TypeTime, value) + } + if _u.mutation.StartedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldStartedAt, field.TypeTime) + } + if value, ok := _u.mutation.FinishedAt(); ok { + _spec.SetField(usagecleanuptask.FieldFinishedAt, field.TypeTime, value) + } + if _u.mutation.FinishedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldFinishedAt, field.TypeTime) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usagecleanuptask.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UsageCleanupTaskUpdateOne is the builder for updating a single UsageCleanupTask entity. +type UsageCleanupTaskUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UsageCleanupTaskMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetUpdatedAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UsageCleanupTaskUpdateOne) SetStatus(v string) *UsageCleanupTaskUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableStatus(v *string) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetFilters sets the "filters" field. +func (_u *UsageCleanupTaskUpdateOne) SetFilters(v json.RawMessage) *UsageCleanupTaskUpdateOne { + _u.mutation.SetFilters(v) + return _u +} + +// AppendFilters appends value to the "filters" field. +func (_u *UsageCleanupTaskUpdateOne) AppendFilters(v json.RawMessage) *UsageCleanupTaskUpdateOne { + _u.mutation.AppendFilters(v) + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *UsageCleanupTaskUpdateOne) SetCreatedBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableCreatedBy(v *int64) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *UsageCleanupTaskUpdateOne) AddCreatedBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.AddCreatedBy(v) + return _u +} + +// SetDeletedRows sets the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdateOne) SetDeletedRows(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.ResetDeletedRows() + _u.mutation.SetDeletedRows(v) + return _u +} + +// SetNillableDeletedRows sets the "deleted_rows" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableDeletedRows(v *int64) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetDeletedRows(*v) + } + return _u +} + +// AddDeletedRows adds value to the "deleted_rows" field. +func (_u *UsageCleanupTaskUpdateOne) AddDeletedRows(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.AddDeletedRows(v) + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *UsageCleanupTaskUpdateOne) SetErrorMessage(v string) *UsageCleanupTaskUpdateOne { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableErrorMessage(v *string) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *UsageCleanupTaskUpdateOne) ClearErrorMessage() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetCanceledBy sets the "canceled_by" field. +func (_u *UsageCleanupTaskUpdateOne) SetCanceledBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.ResetCanceledBy() + _u.mutation.SetCanceledBy(v) + return _u +} + +// SetNillableCanceledBy sets the "canceled_by" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableCanceledBy(v *int64) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetCanceledBy(*v) + } + return _u +} + +// AddCanceledBy adds value to the "canceled_by" field. +func (_u *UsageCleanupTaskUpdateOne) AddCanceledBy(v int64) *UsageCleanupTaskUpdateOne { + _u.mutation.AddCanceledBy(v) + return _u +} + +// ClearCanceledBy clears the value of the "canceled_by" field. +func (_u *UsageCleanupTaskUpdateOne) ClearCanceledBy() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearCanceledBy() + return _u +} + +// SetCanceledAt sets the "canceled_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetCanceledAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetCanceledAt(v) + return _u +} + +// SetNillableCanceledAt sets the "canceled_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableCanceledAt(v *time.Time) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetCanceledAt(*v) + } + return _u +} + +// ClearCanceledAt clears the value of the "canceled_at" field. +func (_u *UsageCleanupTaskUpdateOne) ClearCanceledAt() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearCanceledAt() + return _u +} + +// SetStartedAt sets the "started_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetStartedAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetStartedAt(v) + return _u +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableStartedAt(v *time.Time) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetStartedAt(*v) + } + return _u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (_u *UsageCleanupTaskUpdateOne) ClearStartedAt() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearStartedAt() + return _u +} + +// SetFinishedAt sets the "finished_at" field. +func (_u *UsageCleanupTaskUpdateOne) SetFinishedAt(v time.Time) *UsageCleanupTaskUpdateOne { + _u.mutation.SetFinishedAt(v) + return _u +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_u *UsageCleanupTaskUpdateOne) SetNillableFinishedAt(v *time.Time) *UsageCleanupTaskUpdateOne { + if v != nil { + _u.SetFinishedAt(*v) + } + return _u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (_u *UsageCleanupTaskUpdateOne) ClearFinishedAt() *UsageCleanupTaskUpdateOne { + _u.mutation.ClearFinishedAt() + return _u +} + +// Mutation returns the UsageCleanupTaskMutation object of the builder. +func (_u *UsageCleanupTaskUpdateOne) Mutation() *UsageCleanupTaskMutation { + return _u.mutation +} + +// Where appends a list predicates to the UsageCleanupTaskUpdate builder. +func (_u *UsageCleanupTaskUpdateOne) Where(ps ...predicate.UsageCleanupTask) *UsageCleanupTaskUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UsageCleanupTaskUpdateOne) Select(field string, fields ...string) *UsageCleanupTaskUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UsageCleanupTask entity. +func (_u *UsageCleanupTaskUpdateOne) Save(ctx context.Context) (*UsageCleanupTask, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdateOne) SaveX(ctx context.Context) *UsageCleanupTask { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UsageCleanupTaskUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UsageCleanupTaskUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UsageCleanupTaskUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := usagecleanuptask.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UsageCleanupTaskUpdateOne) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usagecleanuptask.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UsageCleanupTask.status": %w`, err)} + } + } + return nil +} + +func (_u *UsageCleanupTaskUpdateOne) sqlSave(ctx context.Context) (_node *UsageCleanupTask, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usagecleanuptask.Table, usagecleanuptask.Columns, sqlgraph.NewFieldSpec(usagecleanuptask.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UsageCleanupTask.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usagecleanuptask.FieldID) + for _, f := range fields { + if !usagecleanuptask.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != usagecleanuptask.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usagecleanuptask.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usagecleanuptask.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Filters(); ok { + _spec.SetField(usagecleanuptask.FieldFilters, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedFilters(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, usagecleanuptask.FieldFilters, value) + }) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(usagecleanuptask.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.DeletedRows(); ok { + _spec.SetField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedDeletedRows(); ok { + _spec.AddField(usagecleanuptask.FieldDeletedRows, field.TypeInt64, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(usagecleanuptask.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(usagecleanuptask.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.CanceledBy(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCanceledBy(); ok { + _spec.AddField(usagecleanuptask.FieldCanceledBy, field.TypeInt64, value) + } + if _u.mutation.CanceledByCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledBy, field.TypeInt64) + } + if value, ok := _u.mutation.CanceledAt(); ok { + _spec.SetField(usagecleanuptask.FieldCanceledAt, field.TypeTime, value) + } + if _u.mutation.CanceledAtCleared() { + _spec.ClearField(usagecleanuptask.FieldCanceledAt, field.TypeTime) + } + if value, ok := _u.mutation.StartedAt(); ok { + _spec.SetField(usagecleanuptask.FieldStartedAt, field.TypeTime, value) + } + if _u.mutation.StartedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldStartedAt, field.TypeTime) + } + if value, ok := _u.mutation.FinishedAt(); ok { + _spec.SetField(usagecleanuptask.FieldFinishedAt, field.TypeTime, value) + } + if _u.mutation.FinishedAtCleared() { + _spec.ClearField(usagecleanuptask.FieldFinishedAt, field.TypeTime) + } + _node = &UsageCleanupTask{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usagecleanuptask.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/user.go b/backend/ent/user.go index 0b9a48cc..2435aa1b 100644 --- a/backend/ent/user.go +++ b/backend/ent/user.go @@ -39,6 +39,12 @@ type User struct { Username string `json:"username,omitempty"` // Notes holds the value of the "notes" field. Notes string `json:"notes,omitempty"` + // TotpSecretEncrypted holds the value of the "totp_secret_encrypted" field. + TotpSecretEncrypted *string `json:"totp_secret_encrypted,omitempty"` + // TotpEnabled holds the value of the "totp_enabled" field. + TotpEnabled bool `json:"totp_enabled,omitempty"` + // TotpEnabledAt holds the value of the "totp_enabled_at" field. + TotpEnabledAt *time.Time `json:"totp_enabled_at,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the UserQuery when eager-loading is set. Edges UserEdges `json:"edges"` @@ -55,6 +61,8 @@ type UserEdges struct { Subscriptions []*UserSubscription `json:"subscriptions,omitempty"` // AssignedSubscriptions holds the value of the assigned_subscriptions edge. AssignedSubscriptions []*UserSubscription `json:"assigned_subscriptions,omitempty"` + // AnnouncementReads holds the value of the announcement_reads edge. + AnnouncementReads []*AnnouncementRead `json:"announcement_reads,omitempty"` // AllowedGroups holds the value of the allowed_groups edge. AllowedGroups []*Group `json:"allowed_groups,omitempty"` // UsageLogs holds the value of the usage_logs edge. @@ -67,7 +75,7 @@ type UserEdges struct { UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [9]bool + loadedTypes [10]bool } // APIKeysOrErr returns the APIKeys value or an error if the edge @@ -106,10 +114,19 @@ func (e UserEdges) AssignedSubscriptionsOrErr() ([]*UserSubscription, error) { return nil, &NotLoadedError{edge: "assigned_subscriptions"} } +// AnnouncementReadsOrErr returns the AnnouncementReads value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AnnouncementReadsOrErr() ([]*AnnouncementRead, error) { + if e.loadedTypes[4] { + return e.AnnouncementReads, nil + } + return nil, &NotLoadedError{edge: "announcement_reads"} +} + // AllowedGroupsOrErr returns the AllowedGroups value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) AllowedGroupsOrErr() ([]*Group, error) { - if e.loadedTypes[4] { + if e.loadedTypes[5] { return e.AllowedGroups, nil } return nil, &NotLoadedError{edge: "allowed_groups"} @@ -118,7 +135,7 @@ func (e UserEdges) AllowedGroupsOrErr() ([]*Group, error) { // UsageLogsOrErr returns the UsageLogs value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) UsageLogsOrErr() ([]*UsageLog, error) { - if e.loadedTypes[5] { + if e.loadedTypes[6] { return e.UsageLogs, nil } return nil, &NotLoadedError{edge: "usage_logs"} @@ -127,7 +144,7 @@ func (e UserEdges) UsageLogsOrErr() ([]*UsageLog, error) { // AttributeValuesOrErr returns the AttributeValues value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) AttributeValuesOrErr() ([]*UserAttributeValue, error) { - if e.loadedTypes[6] { + if e.loadedTypes[7] { return e.AttributeValues, nil } return nil, &NotLoadedError{edge: "attribute_values"} @@ -136,7 +153,7 @@ func (e UserEdges) AttributeValuesOrErr() ([]*UserAttributeValue, error) { // PromoCodeUsagesOrErr returns the PromoCodeUsages value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) PromoCodeUsagesOrErr() ([]*PromoCodeUsage, error) { - if e.loadedTypes[7] { + if e.loadedTypes[8] { return e.PromoCodeUsages, nil } return nil, &NotLoadedError{edge: "promo_code_usages"} @@ -145,7 +162,7 @@ func (e UserEdges) PromoCodeUsagesOrErr() ([]*PromoCodeUsage, error) { // UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) { - if e.loadedTypes[8] { + if e.loadedTypes[9] { return e.UserAllowedGroups, nil } return nil, &NotLoadedError{edge: "user_allowed_groups"} @@ -156,13 +173,15 @@ func (*User) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case user.FieldTotpEnabled: + values[i] = new(sql.NullBool) case user.FieldBalance: values[i] = new(sql.NullFloat64) case user.FieldID, user.FieldConcurrency: values[i] = new(sql.NullInt64) - case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes: + case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes, user.FieldTotpSecretEncrypted: values[i] = new(sql.NullString) - case user.FieldCreatedAt, user.FieldUpdatedAt, user.FieldDeletedAt: + case user.FieldCreatedAt, user.FieldUpdatedAt, user.FieldDeletedAt, user.FieldTotpEnabledAt: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -252,6 +271,26 @@ func (_m *User) assignValues(columns []string, values []any) error { } else if value.Valid { _m.Notes = value.String } + case user.FieldTotpSecretEncrypted: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field totp_secret_encrypted", values[i]) + } else if value.Valid { + _m.TotpSecretEncrypted = new(string) + *_m.TotpSecretEncrypted = value.String + } + case user.FieldTotpEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field totp_enabled", values[i]) + } else if value.Valid { + _m.TotpEnabled = value.Bool + } + case user.FieldTotpEnabledAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field totp_enabled_at", values[i]) + } else if value.Valid { + _m.TotpEnabledAt = new(time.Time) + *_m.TotpEnabledAt = value.Time + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -285,6 +324,11 @@ func (_m *User) QueryAssignedSubscriptions() *UserSubscriptionQuery { return NewUserClient(_m.config).QueryAssignedSubscriptions(_m) } +// QueryAnnouncementReads queries the "announcement_reads" edge of the User entity. +func (_m *User) QueryAnnouncementReads() *AnnouncementReadQuery { + return NewUserClient(_m.config).QueryAnnouncementReads(_m) +} + // QueryAllowedGroups queries the "allowed_groups" edge of the User entity. func (_m *User) QueryAllowedGroups() *GroupQuery { return NewUserClient(_m.config).QueryAllowedGroups(_m) @@ -367,6 +411,19 @@ func (_m *User) String() string { builder.WriteString(", ") builder.WriteString("notes=") builder.WriteString(_m.Notes) + builder.WriteString(", ") + if v := _m.TotpSecretEncrypted; v != nil { + builder.WriteString("totp_secret_encrypted=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("totp_enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.TotpEnabled)) + builder.WriteString(", ") + if v := _m.TotpEnabledAt; v != nil { + builder.WriteString("totp_enabled_at=") + builder.WriteString(v.Format(time.ANSIC)) + } builder.WriteByte(')') return builder.String() } diff --git a/backend/ent/user/user.go b/backend/ent/user/user.go index 1be1d871..ae9418ff 100644 --- a/backend/ent/user/user.go +++ b/backend/ent/user/user.go @@ -37,6 +37,12 @@ const ( FieldUsername = "username" // FieldNotes holds the string denoting the notes field in the database. FieldNotes = "notes" + // FieldTotpSecretEncrypted holds the string denoting the totp_secret_encrypted field in the database. + FieldTotpSecretEncrypted = "totp_secret_encrypted" + // FieldTotpEnabled holds the string denoting the totp_enabled field in the database. + FieldTotpEnabled = "totp_enabled" + // FieldTotpEnabledAt holds the string denoting the totp_enabled_at field in the database. + FieldTotpEnabledAt = "totp_enabled_at" // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. EdgeAPIKeys = "api_keys" // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. @@ -45,6 +51,8 @@ const ( EdgeSubscriptions = "subscriptions" // EdgeAssignedSubscriptions holds the string denoting the assigned_subscriptions edge name in mutations. EdgeAssignedSubscriptions = "assigned_subscriptions" + // EdgeAnnouncementReads holds the string denoting the announcement_reads edge name in mutations. + EdgeAnnouncementReads = "announcement_reads" // EdgeAllowedGroups holds the string denoting the allowed_groups edge name in mutations. EdgeAllowedGroups = "allowed_groups" // EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations. @@ -85,6 +93,13 @@ const ( AssignedSubscriptionsInverseTable = "user_subscriptions" // AssignedSubscriptionsColumn is the table column denoting the assigned_subscriptions relation/edge. AssignedSubscriptionsColumn = "assigned_by" + // AnnouncementReadsTable is the table that holds the announcement_reads relation/edge. + AnnouncementReadsTable = "announcement_reads" + // AnnouncementReadsInverseTable is the table name for the AnnouncementRead entity. + // It exists in this package in order to avoid circular dependency with the "announcementread" package. + AnnouncementReadsInverseTable = "announcement_reads" + // AnnouncementReadsColumn is the table column denoting the announcement_reads relation/edge. + AnnouncementReadsColumn = "user_id" // AllowedGroupsTable is the table that holds the allowed_groups relation/edge. The primary key declared below. AllowedGroupsTable = "user_allowed_groups" // AllowedGroupsInverseTable is the table name for the Group entity. @@ -134,6 +149,9 @@ var Columns = []string{ FieldStatus, FieldUsername, FieldNotes, + FieldTotpSecretEncrypted, + FieldTotpEnabled, + FieldTotpEnabledAt, } var ( @@ -188,6 +206,8 @@ var ( UsernameValidator func(string) error // DefaultNotes holds the default value on creation for the "notes" field. DefaultNotes string + // DefaultTotpEnabled holds the default value on creation for the "totp_enabled" field. + DefaultTotpEnabled bool ) // OrderOption defines the ordering options for the User queries. @@ -253,6 +273,21 @@ func ByNotes(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldNotes, opts...).ToFunc() } +// ByTotpSecretEncrypted orders the results by the totp_secret_encrypted field. +func ByTotpSecretEncrypted(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotpSecretEncrypted, opts...).ToFunc() +} + +// ByTotpEnabled orders the results by the totp_enabled field. +func ByTotpEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotpEnabled, opts...).ToFunc() +} + +// ByTotpEnabledAt orders the results by the totp_enabled_at field. +func ByTotpEnabledAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotpEnabledAt, opts...).ToFunc() +} + // ByAPIKeysCount orders the results by api_keys count. func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { @@ -309,6 +344,20 @@ func ByAssignedSubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOp } } +// ByAnnouncementReadsCount orders the results by announcement_reads count. +func ByAnnouncementReadsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAnnouncementReadsStep(), opts...) + } +} + +// ByAnnouncementReads orders the results by announcement_reads terms. +func ByAnnouncementReads(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAnnouncementReadsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + // ByAllowedGroupsCount orders the results by allowed_groups count. func ByAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { @@ -406,6 +455,13 @@ func newAssignedSubscriptionsStep() *sqlgraph.Step { sqlgraph.Edge(sqlgraph.O2M, false, AssignedSubscriptionsTable, AssignedSubscriptionsColumn), ) } +func newAnnouncementReadsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AnnouncementReadsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AnnouncementReadsTable, AnnouncementReadsColumn), + ) +} func newAllowedGroupsStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), diff --git a/backend/ent/user/where.go b/backend/ent/user/where.go index 6a460f10..1de61037 100644 --- a/backend/ent/user/where.go +++ b/backend/ent/user/where.go @@ -110,6 +110,21 @@ func Notes(v string) predicate.User { return predicate.User(sql.FieldEQ(FieldNotes, v)) } +// TotpSecretEncrypted applies equality check predicate on the "totp_secret_encrypted" field. It's identical to TotpSecretEncryptedEQ. +func TotpSecretEncrypted(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpSecretEncrypted, v)) +} + +// TotpEnabled applies equality check predicate on the "totp_enabled" field. It's identical to TotpEnabledEQ. +func TotpEnabled(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabled, v)) +} + +// TotpEnabledAt applies equality check predicate on the "totp_enabled_at" field. It's identical to TotpEnabledAtEQ. +func TotpEnabledAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabledAt, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.User { return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) @@ -710,6 +725,141 @@ func NotesContainsFold(v string) predicate.User { return predicate.User(sql.FieldContainsFold(FieldNotes, v)) } +// TotpSecretEncryptedEQ applies the EQ predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedNEQ applies the NEQ predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedIn applies the In predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldTotpSecretEncrypted, vs...)) +} + +// TotpSecretEncryptedNotIn applies the NotIn predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldTotpSecretEncrypted, vs...)) +} + +// TotpSecretEncryptedGT applies the GT predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedGTE applies the GTE predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedLT applies the LT predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedLTE applies the LTE predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedContains applies the Contains predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedHasPrefix applies the HasPrefix predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedHasSuffix applies the HasSuffix predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedIsNil applies the IsNil predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldTotpSecretEncrypted)) +} + +// TotpSecretEncryptedNotNil applies the NotNil predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldTotpSecretEncrypted)) +} + +// TotpSecretEncryptedEqualFold applies the EqualFold predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldTotpSecretEncrypted, v)) +} + +// TotpSecretEncryptedContainsFold applies the ContainsFold predicate on the "totp_secret_encrypted" field. +func TotpSecretEncryptedContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldTotpSecretEncrypted, v)) +} + +// TotpEnabledEQ applies the EQ predicate on the "totp_enabled" field. +func TotpEnabledEQ(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabled, v)) +} + +// TotpEnabledNEQ applies the NEQ predicate on the "totp_enabled" field. +func TotpEnabledNEQ(v bool) predicate.User { + return predicate.User(sql.FieldNEQ(FieldTotpEnabled, v)) +} + +// TotpEnabledAtEQ applies the EQ predicate on the "totp_enabled_at" field. +func TotpEnabledAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtNEQ applies the NEQ predicate on the "totp_enabled_at" field. +func TotpEnabledAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtIn applies the In predicate on the "totp_enabled_at" field. +func TotpEnabledAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldTotpEnabledAt, vs...)) +} + +// TotpEnabledAtNotIn applies the NotIn predicate on the "totp_enabled_at" field. +func TotpEnabledAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldTotpEnabledAt, vs...)) +} + +// TotpEnabledAtGT applies the GT predicate on the "totp_enabled_at" field. +func TotpEnabledAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtGTE applies the GTE predicate on the "totp_enabled_at" field. +func TotpEnabledAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtLT applies the LT predicate on the "totp_enabled_at" field. +func TotpEnabledAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtLTE applies the LTE predicate on the "totp_enabled_at" field. +func TotpEnabledAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldTotpEnabledAt, v)) +} + +// TotpEnabledAtIsNil applies the IsNil predicate on the "totp_enabled_at" field. +func TotpEnabledAtIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldTotpEnabledAt)) +} + +// TotpEnabledAtNotNil applies the NotNil predicate on the "totp_enabled_at" field. +func TotpEnabledAtNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldTotpEnabledAt)) +} + // HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. func HasAPIKeys() predicate.User { return predicate.User(func(s *sql.Selector) { @@ -802,6 +952,29 @@ func HasAssignedSubscriptionsWith(preds ...predicate.UserSubscription) predicate }) } +// HasAnnouncementReads applies the HasEdge predicate on the "announcement_reads" edge. +func HasAnnouncementReads() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AnnouncementReadsTable, AnnouncementReadsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAnnouncementReadsWith applies the HasEdge predicate on the "announcement_reads" edge with a given conditions (other predicates). +func HasAnnouncementReadsWith(preds ...predicate.AnnouncementRead) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAnnouncementReadsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // HasAllowedGroups applies the HasEdge predicate on the "allowed_groups" edge. func HasAllowedGroups() predicate.User { return predicate.User(func(s *sql.Selector) { diff --git a/backend/ent/user_create.go b/backend/ent/user_create.go index e12e476c..f862a580 100644 --- a/backend/ent/user_create.go +++ b/backend/ent/user_create.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" @@ -167,6 +168,48 @@ func (_c *UserCreate) SetNillableNotes(v *string) *UserCreate { return _c } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (_c *UserCreate) SetTotpSecretEncrypted(v string) *UserCreate { + _c.mutation.SetTotpSecretEncrypted(v) + return _c +} + +// SetNillableTotpSecretEncrypted sets the "totp_secret_encrypted" field if the given value is not nil. +func (_c *UserCreate) SetNillableTotpSecretEncrypted(v *string) *UserCreate { + if v != nil { + _c.SetTotpSecretEncrypted(*v) + } + return _c +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (_c *UserCreate) SetTotpEnabled(v bool) *UserCreate { + _c.mutation.SetTotpEnabled(v) + return _c +} + +// SetNillableTotpEnabled sets the "totp_enabled" field if the given value is not nil. +func (_c *UserCreate) SetNillableTotpEnabled(v *bool) *UserCreate { + if v != nil { + _c.SetTotpEnabled(*v) + } + return _c +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (_c *UserCreate) SetTotpEnabledAt(v time.Time) *UserCreate { + _c.mutation.SetTotpEnabledAt(v) + return _c +} + +// SetNillableTotpEnabledAt sets the "totp_enabled_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableTotpEnabledAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetTotpEnabledAt(*v) + } + return _c +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_c *UserCreate) AddAPIKeyIDs(ids ...int64) *UserCreate { _c.mutation.AddAPIKeyIDs(ids...) @@ -227,6 +270,21 @@ func (_c *UserCreate) AddAssignedSubscriptions(v ...*UserSubscription) *UserCrea return _c.AddAssignedSubscriptionIDs(ids...) } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (_c *UserCreate) AddAnnouncementReadIDs(ids ...int64) *UserCreate { + _c.mutation.AddAnnouncementReadIDs(ids...) + return _c +} + +// AddAnnouncementReads adds the "announcement_reads" edges to the AnnouncementRead entity. +func (_c *UserCreate) AddAnnouncementReads(v ...*AnnouncementRead) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAnnouncementReadIDs(ids...) +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. func (_c *UserCreate) AddAllowedGroupIDs(ids ...int64) *UserCreate { _c.mutation.AddAllowedGroupIDs(ids...) @@ -362,6 +420,10 @@ func (_c *UserCreate) defaults() error { v := user.DefaultNotes _c.mutation.SetNotes(v) } + if _, ok := _c.mutation.TotpEnabled(); !ok { + v := user.DefaultTotpEnabled + _c.mutation.SetTotpEnabled(v) + } return nil } @@ -422,6 +484,9 @@ func (_c *UserCreate) check() error { if _, ok := _c.mutation.Notes(); !ok { return &ValidationError{Name: "notes", err: errors.New(`ent: missing required field "User.notes"`)} } + if _, ok := _c.mutation.TotpEnabled(); !ok { + return &ValidationError{Name: "totp_enabled", err: errors.New(`ent: missing required field "User.totp_enabled"`)} + } return nil } @@ -493,6 +558,18 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { _spec.SetField(user.FieldNotes, field.TypeString, value) _node.Notes = value } + if value, ok := _c.mutation.TotpSecretEncrypted(); ok { + _spec.SetField(user.FieldTotpSecretEncrypted, field.TypeString, value) + _node.TotpSecretEncrypted = &value + } + if value, ok := _c.mutation.TotpEnabled(); ok { + _spec.SetField(user.FieldTotpEnabled, field.TypeBool, value) + _node.TotpEnabled = value + } + if value, ok := _c.mutation.TotpEnabledAt(); ok { + _spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value) + _node.TotpEnabledAt = &value + } if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -557,6 +634,22 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } + if nodes := _c.mutation.AnnouncementReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } if nodes := _c.mutation.AllowedGroupsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, @@ -815,6 +908,54 @@ func (u *UserUpsert) UpdateNotes() *UserUpsert { return u } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (u *UserUpsert) SetTotpSecretEncrypted(v string) *UserUpsert { + u.Set(user.FieldTotpSecretEncrypted, v) + return u +} + +// UpdateTotpSecretEncrypted sets the "totp_secret_encrypted" field to the value that was provided on create. +func (u *UserUpsert) UpdateTotpSecretEncrypted() *UserUpsert { + u.SetExcluded(user.FieldTotpSecretEncrypted) + return u +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (u *UserUpsert) ClearTotpSecretEncrypted() *UserUpsert { + u.SetNull(user.FieldTotpSecretEncrypted) + return u +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (u *UserUpsert) SetTotpEnabled(v bool) *UserUpsert { + u.Set(user.FieldTotpEnabled, v) + return u +} + +// UpdateTotpEnabled sets the "totp_enabled" field to the value that was provided on create. +func (u *UserUpsert) UpdateTotpEnabled() *UserUpsert { + u.SetExcluded(user.FieldTotpEnabled) + return u +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (u *UserUpsert) SetTotpEnabledAt(v time.Time) *UserUpsert { + u.Set(user.FieldTotpEnabledAt, v) + return u +} + +// UpdateTotpEnabledAt sets the "totp_enabled_at" field to the value that was provided on create. +func (u *UserUpsert) UpdateTotpEnabledAt() *UserUpsert { + u.SetExcluded(user.FieldTotpEnabledAt) + return u +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (u *UserUpsert) ClearTotpEnabledAt() *UserUpsert { + u.SetNull(user.FieldTotpEnabledAt) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create. // Using this option is equivalent to using: // @@ -1021,6 +1162,62 @@ func (u *UserUpsertOne) UpdateNotes() *UserUpsertOne { }) } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (u *UserUpsertOne) SetTotpSecretEncrypted(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetTotpSecretEncrypted(v) + }) +} + +// UpdateTotpSecretEncrypted sets the "totp_secret_encrypted" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateTotpSecretEncrypted() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpSecretEncrypted() + }) +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (u *UserUpsertOne) ClearTotpSecretEncrypted() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearTotpSecretEncrypted() + }) +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (u *UserUpsertOne) SetTotpEnabled(v bool) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabled(v) + }) +} + +// UpdateTotpEnabled sets the "totp_enabled" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateTotpEnabled() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabled() + }) +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (u *UserUpsertOne) SetTotpEnabledAt(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabledAt(v) + }) +} + +// UpdateTotpEnabledAt sets the "totp_enabled_at" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateTotpEnabledAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabledAt() + }) +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (u *UserUpsertOne) ClearTotpEnabledAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearTotpEnabledAt() + }) +} + // Exec executes the query. func (u *UserUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -1393,6 +1590,62 @@ func (u *UserUpsertBulk) UpdateNotes() *UserUpsertBulk { }) } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (u *UserUpsertBulk) SetTotpSecretEncrypted(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetTotpSecretEncrypted(v) + }) +} + +// UpdateTotpSecretEncrypted sets the "totp_secret_encrypted" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateTotpSecretEncrypted() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpSecretEncrypted() + }) +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (u *UserUpsertBulk) ClearTotpSecretEncrypted() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearTotpSecretEncrypted() + }) +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (u *UserUpsertBulk) SetTotpEnabled(v bool) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabled(v) + }) +} + +// UpdateTotpEnabled sets the "totp_enabled" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateTotpEnabled() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabled() + }) +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (u *UserUpsertBulk) SetTotpEnabledAt(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetTotpEnabledAt(v) + }) +} + +// UpdateTotpEnabledAt sets the "totp_enabled_at" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateTotpEnabledAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateTotpEnabledAt() + }) +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (u *UserUpsertBulk) ClearTotpEnabledAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearTotpEnabledAt() + }) +} + // Exec executes the query. func (u *UserUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/backend/ent/user_query.go b/backend/ent/user_query.go index e66e2dc8..4b56e16f 100644 --- a/backend/ent/user_query.go +++ b/backend/ent/user_query.go @@ -13,6 +13,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -36,6 +37,7 @@ type UserQuery struct { withRedeemCodes *RedeemCodeQuery withSubscriptions *UserSubscriptionQuery withAssignedSubscriptions *UserSubscriptionQuery + withAnnouncementReads *AnnouncementReadQuery withAllowedGroups *GroupQuery withUsageLogs *UsageLogQuery withAttributeValues *UserAttributeValueQuery @@ -166,6 +168,28 @@ func (_q *UserQuery) QueryAssignedSubscriptions() *UserSubscriptionQuery { return query } +// QueryAnnouncementReads chains the current query on the "announcement_reads" edge. +func (_q *UserQuery) QueryAnnouncementReads() *AnnouncementReadQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(announcementread.Table, announcementread.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AnnouncementReadsTable, user.AnnouncementReadsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + // QueryAllowedGroups chains the current query on the "allowed_groups" edge. func (_q *UserQuery) QueryAllowedGroups() *GroupQuery { query := (&GroupClient{config: _q.config}).Query() @@ -472,6 +496,7 @@ func (_q *UserQuery) Clone() *UserQuery { withRedeemCodes: _q.withRedeemCodes.Clone(), withSubscriptions: _q.withSubscriptions.Clone(), withAssignedSubscriptions: _q.withAssignedSubscriptions.Clone(), + withAnnouncementReads: _q.withAnnouncementReads.Clone(), withAllowedGroups: _q.withAllowedGroups.Clone(), withUsageLogs: _q.withUsageLogs.Clone(), withAttributeValues: _q.withAttributeValues.Clone(), @@ -527,6 +552,17 @@ func (_q *UserQuery) WithAssignedSubscriptions(opts ...func(*UserSubscriptionQue return _q } +// WithAnnouncementReads tells the query-builder to eager-load the nodes that are connected to +// the "announcement_reads" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAnnouncementReads(opts ...func(*AnnouncementReadQuery)) *UserQuery { + query := (&AnnouncementReadClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAnnouncementReads = query + return _q +} + // WithAllowedGroups tells the query-builder to eager-load the nodes that are connected to // the "allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. func (_q *UserQuery) WithAllowedGroups(opts ...func(*GroupQuery)) *UserQuery { @@ -660,11 +696,12 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e var ( nodes = []*User{} _spec = _q.querySpec() - loadedTypes = [9]bool{ + loadedTypes = [10]bool{ _q.withAPIKeys != nil, _q.withRedeemCodes != nil, _q.withSubscriptions != nil, _q.withAssignedSubscriptions != nil, + _q.withAnnouncementReads != nil, _q.withAllowedGroups != nil, _q.withUsageLogs != nil, _q.withAttributeValues != nil, @@ -723,6 +760,13 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e return nil, err } } + if query := _q.withAnnouncementReads; query != nil { + if err := _q.loadAnnouncementReads(ctx, query, nodes, + func(n *User) { n.Edges.AnnouncementReads = []*AnnouncementRead{} }, + func(n *User, e *AnnouncementRead) { n.Edges.AnnouncementReads = append(n.Edges.AnnouncementReads, e) }); err != nil { + return nil, err + } + } if query := _q.withAllowedGroups; query != nil { if err := _q.loadAllowedGroups(ctx, query, nodes, func(n *User) { n.Edges.AllowedGroups = []*Group{} }, @@ -887,6 +931,36 @@ func (_q *UserQuery) loadAssignedSubscriptions(ctx context.Context, query *UserS } return nil } +func (_q *UserQuery) loadAnnouncementReads(ctx context.Context, query *AnnouncementReadQuery, nodes []*User, init func(*User), assign func(*User, *AnnouncementRead)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(announcementread.FieldUserID) + } + query.Where(predicate.AnnouncementRead(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.AnnouncementReadsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} func (_q *UserQuery) loadAllowedGroups(ctx context.Context, query *GroupQuery, nodes []*User, init func(*User), assign func(*User, *Group)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[int64]*User) diff --git a/backend/ent/user_update.go b/backend/ent/user_update.go index cf189fea..80222c92 100644 --- a/backend/ent/user_update.go +++ b/backend/ent/user_update.go @@ -11,6 +11,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/announcementread" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" @@ -187,6 +188,60 @@ func (_u *UserUpdate) SetNillableNotes(v *string) *UserUpdate { return _u } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (_u *UserUpdate) SetTotpSecretEncrypted(v string) *UserUpdate { + _u.mutation.SetTotpSecretEncrypted(v) + return _u +} + +// SetNillableTotpSecretEncrypted sets the "totp_secret_encrypted" field if the given value is not nil. +func (_u *UserUpdate) SetNillableTotpSecretEncrypted(v *string) *UserUpdate { + if v != nil { + _u.SetTotpSecretEncrypted(*v) + } + return _u +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (_u *UserUpdate) ClearTotpSecretEncrypted() *UserUpdate { + _u.mutation.ClearTotpSecretEncrypted() + return _u +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (_u *UserUpdate) SetTotpEnabled(v bool) *UserUpdate { + _u.mutation.SetTotpEnabled(v) + return _u +} + +// SetNillableTotpEnabled sets the "totp_enabled" field if the given value is not nil. +func (_u *UserUpdate) SetNillableTotpEnabled(v *bool) *UserUpdate { + if v != nil { + _u.SetTotpEnabled(*v) + } + return _u +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (_u *UserUpdate) SetTotpEnabledAt(v time.Time) *UserUpdate { + _u.mutation.SetTotpEnabledAt(v) + return _u +} + +// SetNillableTotpEnabledAt sets the "totp_enabled_at" field if the given value is not nil. +func (_u *UserUpdate) SetNillableTotpEnabledAt(v *time.Time) *UserUpdate { + if v != nil { + _u.SetTotpEnabledAt(*v) + } + return _u +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (_u *UserUpdate) ClearTotpEnabledAt() *UserUpdate { + _u.mutation.ClearTotpEnabledAt() + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *UserUpdate) AddAPIKeyIDs(ids ...int64) *UserUpdate { _u.mutation.AddAPIKeyIDs(ids...) @@ -247,6 +302,21 @@ func (_u *UserUpdate) AddAssignedSubscriptions(v ...*UserSubscription) *UserUpda return _u.AddAssignedSubscriptionIDs(ids...) } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (_u *UserUpdate) AddAnnouncementReadIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAnnouncementReadIDs(ids...) + return _u +} + +// AddAnnouncementReads adds the "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdate) AddAnnouncementReads(v ...*AnnouncementRead) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAnnouncementReadIDs(ids...) +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. func (_u *UserUpdate) AddAllowedGroupIDs(ids ...int64) *UserUpdate { _u.mutation.AddAllowedGroupIDs(ids...) @@ -396,6 +466,27 @@ func (_u *UserUpdate) RemoveAssignedSubscriptions(v ...*UserSubscription) *UserU return _u.RemoveAssignedSubscriptionIDs(ids...) } +// ClearAnnouncementReads clears all "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdate) ClearAnnouncementReads() *UserUpdate { + _u.mutation.ClearAnnouncementReads() + return _u +} + +// RemoveAnnouncementReadIDs removes the "announcement_reads" edge to AnnouncementRead entities by IDs. +func (_u *UserUpdate) RemoveAnnouncementReadIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAnnouncementReadIDs(ids...) + return _u +} + +// RemoveAnnouncementReads removes "announcement_reads" edges to AnnouncementRead entities. +func (_u *UserUpdate) RemoveAnnouncementReads(v ...*AnnouncementRead) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAnnouncementReadIDs(ids...) +} + // ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. func (_u *UserUpdate) ClearAllowedGroups() *UserUpdate { _u.mutation.ClearAllowedGroups() @@ -603,6 +694,21 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.Notes(); ok { _spec.SetField(user.FieldNotes, field.TypeString, value) } + if value, ok := _u.mutation.TotpSecretEncrypted(); ok { + _spec.SetField(user.FieldTotpSecretEncrypted, field.TypeString, value) + } + if _u.mutation.TotpSecretEncryptedCleared() { + _spec.ClearField(user.FieldTotpSecretEncrypted, field.TypeString) + } + if value, ok := _u.mutation.TotpEnabled(); ok { + _spec.SetField(user.FieldTotpEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.TotpEnabledAt(); ok { + _spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value) + } + if _u.mutation.TotpEnabledAtCleared() { + _spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -783,6 +889,51 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAnnouncementReadsIDs(); len(nodes) > 0 && !_u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if _u.mutation.AllowedGroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, @@ -1147,6 +1298,60 @@ func (_u *UserUpdateOne) SetNillableNotes(v *string) *UserUpdateOne { return _u } +// SetTotpSecretEncrypted sets the "totp_secret_encrypted" field. +func (_u *UserUpdateOne) SetTotpSecretEncrypted(v string) *UserUpdateOne { + _u.mutation.SetTotpSecretEncrypted(v) + return _u +} + +// SetNillableTotpSecretEncrypted sets the "totp_secret_encrypted" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableTotpSecretEncrypted(v *string) *UserUpdateOne { + if v != nil { + _u.SetTotpSecretEncrypted(*v) + } + return _u +} + +// ClearTotpSecretEncrypted clears the value of the "totp_secret_encrypted" field. +func (_u *UserUpdateOne) ClearTotpSecretEncrypted() *UserUpdateOne { + _u.mutation.ClearTotpSecretEncrypted() + return _u +} + +// SetTotpEnabled sets the "totp_enabled" field. +func (_u *UserUpdateOne) SetTotpEnabled(v bool) *UserUpdateOne { + _u.mutation.SetTotpEnabled(v) + return _u +} + +// SetNillableTotpEnabled sets the "totp_enabled" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableTotpEnabled(v *bool) *UserUpdateOne { + if v != nil { + _u.SetTotpEnabled(*v) + } + return _u +} + +// SetTotpEnabledAt sets the "totp_enabled_at" field. +func (_u *UserUpdateOne) SetTotpEnabledAt(v time.Time) *UserUpdateOne { + _u.mutation.SetTotpEnabledAt(v) + return _u +} + +// SetNillableTotpEnabledAt sets the "totp_enabled_at" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableTotpEnabledAt(v *time.Time) *UserUpdateOne { + if v != nil { + _u.SetTotpEnabledAt(*v) + } + return _u +} + +// ClearTotpEnabledAt clears the value of the "totp_enabled_at" field. +func (_u *UserUpdateOne) ClearTotpEnabledAt() *UserUpdateOne { + _u.mutation.ClearTotpEnabledAt() + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *UserUpdateOne) AddAPIKeyIDs(ids ...int64) *UserUpdateOne { _u.mutation.AddAPIKeyIDs(ids...) @@ -1207,6 +1412,21 @@ func (_u *UserUpdateOne) AddAssignedSubscriptions(v ...*UserSubscription) *UserU return _u.AddAssignedSubscriptionIDs(ids...) } +// AddAnnouncementReadIDs adds the "announcement_reads" edge to the AnnouncementRead entity by IDs. +func (_u *UserUpdateOne) AddAnnouncementReadIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAnnouncementReadIDs(ids...) + return _u +} + +// AddAnnouncementReads adds the "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdateOne) AddAnnouncementReads(v ...*AnnouncementRead) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAnnouncementReadIDs(ids...) +} + // AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. func (_u *UserUpdateOne) AddAllowedGroupIDs(ids ...int64) *UserUpdateOne { _u.mutation.AddAllowedGroupIDs(ids...) @@ -1356,6 +1576,27 @@ func (_u *UserUpdateOne) RemoveAssignedSubscriptions(v ...*UserSubscription) *Us return _u.RemoveAssignedSubscriptionIDs(ids...) } +// ClearAnnouncementReads clears all "announcement_reads" edges to the AnnouncementRead entity. +func (_u *UserUpdateOne) ClearAnnouncementReads() *UserUpdateOne { + _u.mutation.ClearAnnouncementReads() + return _u +} + +// RemoveAnnouncementReadIDs removes the "announcement_reads" edge to AnnouncementRead entities by IDs. +func (_u *UserUpdateOne) RemoveAnnouncementReadIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAnnouncementReadIDs(ids...) + return _u +} + +// RemoveAnnouncementReads removes "announcement_reads" edges to AnnouncementRead entities. +func (_u *UserUpdateOne) RemoveAnnouncementReads(v ...*AnnouncementRead) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAnnouncementReadIDs(ids...) +} + // ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. func (_u *UserUpdateOne) ClearAllowedGroups() *UserUpdateOne { _u.mutation.ClearAllowedGroups() @@ -1593,6 +1834,21 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { if value, ok := _u.mutation.Notes(); ok { _spec.SetField(user.FieldNotes, field.TypeString, value) } + if value, ok := _u.mutation.TotpSecretEncrypted(); ok { + _spec.SetField(user.FieldTotpSecretEncrypted, field.TypeString, value) + } + if _u.mutation.TotpSecretEncryptedCleared() { + _spec.ClearField(user.FieldTotpSecretEncrypted, field.TypeString) + } + if value, ok := _u.mutation.TotpEnabled(); ok { + _spec.SetField(user.FieldTotpEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.TotpEnabledAt(); ok { + _spec.SetField(user.FieldTotpEnabledAt, field.TypeTime, value) + } + if _u.mutation.TotpEnabledAtCleared() { + _spec.ClearField(user.FieldTotpEnabledAt, field.TypeTime) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1773,6 +2029,51 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAnnouncementReadsIDs(); len(nodes) > 0 && !_u.mutation.AnnouncementReadsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AnnouncementReadsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AnnouncementReadsTable, + Columns: []string{user.AnnouncementReadsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if _u.mutation.AllowedGroupsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, diff --git a/backend/go.mod b/backend/go.mod index 4ac6ba14..329eddfc 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -1,6 +1,6 @@ module github.com/Wei-Shaw/sub2api -go 1.25.5 +go 1.25.6 require ( entgo.io/ent v0.14.5 diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 85face75..84be445b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -47,6 +47,7 @@ type Config struct { Redis RedisConfig `mapstructure:"redis"` Ops OpsConfig `mapstructure:"ops"` JWT JWTConfig `mapstructure:"jwt"` + Totp TotpConfig `mapstructure:"totp"` LinuxDo LinuxDoConnectConfig `mapstructure:"linuxdo_connect"` Default DefaultConfig `mapstructure:"default"` RateLimit RateLimitConfig `mapstructure:"rate_limit"` @@ -55,6 +56,7 @@ type Config struct { APIKeyAuth APIKeyAuthCacheConfig `mapstructure:"api_key_auth_cache"` Dashboard DashboardCacheConfig `mapstructure:"dashboard_cache"` DashboardAgg DashboardAggregationConfig `mapstructure:"dashboard_aggregation"` + UsageCleanup UsageCleanupConfig `mapstructure:"usage_cleanup"` Concurrency ConcurrencyConfig `mapstructure:"concurrency"` TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"` RunMode string `mapstructure:"run_mode" yaml:"run_mode"` @@ -267,6 +269,33 @@ type GatewayConfig struct { // Scheduling: 账号调度相关配置 Scheduling GatewaySchedulingConfig `mapstructure:"scheduling"` + + // TLSFingerprint: TLS指纹伪装配置 + TLSFingerprint TLSFingerprintConfig `mapstructure:"tls_fingerprint"` +} + +// TLSFingerprintConfig TLS指纹伪装配置 +// 用于模拟 Claude CLI (Node.js) 的 TLS 握手特征,避免被识别为非官方客户端 +type TLSFingerprintConfig struct { + // Enabled: 是否全局启用TLS指纹功能 + Enabled bool `mapstructure:"enabled"` + // Profiles: 预定义的TLS指纹配置模板 + // key 为模板名称,如 "claude_cli_v2", "chrome_120" 等 + Profiles map[string]TLSProfileConfig `mapstructure:"profiles"` +} + +// TLSProfileConfig 单个TLS指纹模板的配置 +type TLSProfileConfig struct { + // Name: 模板显示名称 + Name string `mapstructure:"name"` + // EnableGREASE: 是否启用GREASE扩展(Chrome使用,Node.js不使用) + EnableGREASE bool `mapstructure:"enable_grease"` + // CipherSuites: TLS加密套件列表(空则使用内置默认值) + CipherSuites []uint16 `mapstructure:"cipher_suites"` + // Curves: 椭圆曲线列表(空则使用内置默认值) + Curves []uint16 `mapstructure:"curves"` + // PointFormats: 点格式列表(空则使用内置默认值) + PointFormats []uint8 `mapstructure:"point_formats"` } // GatewaySchedulingConfig accounts scheduling configuration. @@ -386,6 +415,8 @@ type RedisConfig struct { PoolSize int `mapstructure:"pool_size"` // MinIdleConns: 最小空闲连接数,保持热连接减少冷启动延迟 MinIdleConns int `mapstructure:"min_idle_conns"` + // EnableTLS: 是否启用 TLS/SSL 连接 + EnableTLS bool `mapstructure:"enable_tls"` } func (r *RedisConfig) Address() string { @@ -438,6 +469,16 @@ type JWTConfig struct { ExpireHour int `mapstructure:"expire_hour"` } +// TotpConfig TOTP 双因素认证配置 +type TotpConfig struct { + // EncryptionKey 用于加密 TOTP 密钥的 AES-256 密钥(32 字节 hex 编码) + // 如果为空,将自动生成一个随机密钥(仅适用于开发环境) + EncryptionKey string `mapstructure:"encryption_key"` + // EncryptionKeyConfigured 标记加密密钥是否为手动配置(非自动生成) + // 只有手动配置了密钥才允许在管理后台启用 TOTP 功能 + EncryptionKeyConfigured bool `mapstructure:"-"` +} + type TurnstileConfig struct { Required bool `mapstructure:"required"` } @@ -504,6 +545,20 @@ type DashboardAggregationRetentionConfig struct { DailyDays int `mapstructure:"daily_days"` } +// UsageCleanupConfig 使用记录清理任务配置 +type UsageCleanupConfig struct { + // Enabled: 是否启用清理任务执行器 + Enabled bool `mapstructure:"enabled"` + // MaxRangeDays: 单次任务允许的最大时间跨度(天) + MaxRangeDays int `mapstructure:"max_range_days"` + // BatchSize: 单批删除数量 + BatchSize int `mapstructure:"batch_size"` + // WorkerIntervalSeconds: 后台任务轮询间隔(秒) + WorkerIntervalSeconds int `mapstructure:"worker_interval_seconds"` + // TaskTimeoutSeconds: 单次任务最大执行时长(秒) + TaskTimeoutSeconds int `mapstructure:"task_timeout_seconds"` +} + func NormalizeRunMode(value string) string { normalized := strings.ToLower(strings.TrimSpace(value)) switch normalized { @@ -584,6 +639,20 @@ func Load() (*Config, error) { log.Println("Warning: JWT secret auto-generated. Consider setting a fixed secret for production.") } + // Auto-generate TOTP encryption key if not set (32 bytes = 64 hex chars for AES-256) + cfg.Totp.EncryptionKey = strings.TrimSpace(cfg.Totp.EncryptionKey) + if cfg.Totp.EncryptionKey == "" { + key, err := generateJWTSecret(32) // Reuse the same random generation function + if err != nil { + return nil, fmt.Errorf("generate totp encryption key error: %w", err) + } + cfg.Totp.EncryptionKey = key + cfg.Totp.EncryptionKeyConfigured = false + log.Println("Warning: TOTP encryption key auto-generated. Consider setting a fixed key for production.") + } else { + cfg.Totp.EncryptionKeyConfigured = true + } + if err := cfg.Validate(); err != nil { return nil, fmt.Errorf("validate config error: %w", err) } @@ -695,6 +764,7 @@ func setDefaults() { viper.SetDefault("redis.write_timeout_seconds", 3) viper.SetDefault("redis.pool_size", 128) viper.SetDefault("redis.min_idle_conns", 10) + viper.SetDefault("redis.enable_tls", false) // Ops (vNext) viper.SetDefault("ops.enabled", true) @@ -714,6 +784,9 @@ func setDefaults() { viper.SetDefault("jwt.secret", "") viper.SetDefault("jwt.expire_hour", 24) + // TOTP + viper.SetDefault("totp.encryption_key", "") + // Default // Admin credentials are created via the setup flow (web wizard / CLI / AUTO_SETUP). // Do not ship fixed defaults here to avoid insecure "known credentials" in production. @@ -764,6 +837,13 @@ func setDefaults() { viper.SetDefault("dashboard_aggregation.retention.daily_days", 730) viper.SetDefault("dashboard_aggregation.recompute_days", 2) + // Usage cleanup task + viper.SetDefault("usage_cleanup.enabled", true) + viper.SetDefault("usage_cleanup.max_range_days", 31) + viper.SetDefault("usage_cleanup.batch_size", 5000) + viper.SetDefault("usage_cleanup.worker_interval_seconds", 10) + viper.SetDefault("usage_cleanup.task_timeout_seconds", 1800) + // Gateway viper.SetDefault("gateway.response_header_timeout", 600) // 600秒(10分钟)等待上游响应头,LLM高负载时可能排队较久 viper.SetDefault("gateway.log_upstream_error_body", true) @@ -802,6 +882,8 @@ func setDefaults() { viper.SetDefault("gateway.scheduling.outbox_lag_rebuild_failures", 3) viper.SetDefault("gateway.scheduling.outbox_backlog_rebuild_rows", 10000) viper.SetDefault("gateway.scheduling.full_rebuild_interval_seconds", 300) + // TLS指纹伪装配置(默认关闭,需要账号级别单独启用) + viper.SetDefault("gateway.tls_fingerprint.enabled", true) viper.SetDefault("concurrency.ping_interval", 10) // TokenRefresh @@ -1004,6 +1086,33 @@ func (c *Config) Validate() error { return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative") } } + if c.UsageCleanup.Enabled { + if c.UsageCleanup.MaxRangeDays <= 0 { + return fmt.Errorf("usage_cleanup.max_range_days must be positive") + } + if c.UsageCleanup.BatchSize <= 0 { + return fmt.Errorf("usage_cleanup.batch_size must be positive") + } + if c.UsageCleanup.WorkerIntervalSeconds <= 0 { + return fmt.Errorf("usage_cleanup.worker_interval_seconds must be positive") + } + if c.UsageCleanup.TaskTimeoutSeconds <= 0 { + return fmt.Errorf("usage_cleanup.task_timeout_seconds must be positive") + } + } else { + if c.UsageCleanup.MaxRangeDays < 0 { + return fmt.Errorf("usage_cleanup.max_range_days must be non-negative") + } + if c.UsageCleanup.BatchSize < 0 { + return fmt.Errorf("usage_cleanup.batch_size must be non-negative") + } + if c.UsageCleanup.WorkerIntervalSeconds < 0 { + return fmt.Errorf("usage_cleanup.worker_interval_seconds must be non-negative") + } + if c.UsageCleanup.TaskTimeoutSeconds < 0 { + return fmt.Errorf("usage_cleanup.task_timeout_seconds must be non-negative") + } + } if c.Gateway.MaxBodySize <= 0 { return fmt.Errorf("gateway.max_body_size must be positive") } diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index 4637989e..f734619f 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -280,3 +280,573 @@ func TestValidateDashboardAggregationBackfillMaxDays(t *testing.T) { t.Fatalf("Validate() expected backfill_max_days error, got: %v", err) } } + +func TestLoadDefaultUsageCleanupConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if !cfg.UsageCleanup.Enabled { + t.Fatalf("UsageCleanup.Enabled = false, want true") + } + if cfg.UsageCleanup.MaxRangeDays != 31 { + t.Fatalf("UsageCleanup.MaxRangeDays = %d, want 31", cfg.UsageCleanup.MaxRangeDays) + } + if cfg.UsageCleanup.BatchSize != 5000 { + t.Fatalf("UsageCleanup.BatchSize = %d, want 5000", cfg.UsageCleanup.BatchSize) + } + if cfg.UsageCleanup.WorkerIntervalSeconds != 10 { + t.Fatalf("UsageCleanup.WorkerIntervalSeconds = %d, want 10", cfg.UsageCleanup.WorkerIntervalSeconds) + } + if cfg.UsageCleanup.TaskTimeoutSeconds != 1800 { + t.Fatalf("UsageCleanup.TaskTimeoutSeconds = %d, want 1800", cfg.UsageCleanup.TaskTimeoutSeconds) + } +} + +func TestValidateUsageCleanupConfigEnabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.UsageCleanup.Enabled = true + cfg.UsageCleanup.MaxRangeDays = 0 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for usage_cleanup.max_range_days, got nil") + } + if !strings.Contains(err.Error(), "usage_cleanup.max_range_days") { + t.Fatalf("Validate() expected max_range_days error, got: %v", err) + } +} + +func TestValidateUsageCleanupConfigDisabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.UsageCleanup.Enabled = false + cfg.UsageCleanup.BatchSize = -1 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for usage_cleanup.batch_size, got nil") + } + if !strings.Contains(err.Error(), "usage_cleanup.batch_size") { + t.Fatalf("Validate() expected batch_size error, got: %v", err) + } +} + +func TestConfigAddressHelpers(t *testing.T) { + server := ServerConfig{Host: "127.0.0.1", Port: 9000} + if server.Address() != "127.0.0.1:9000" { + t.Fatalf("ServerConfig.Address() = %q", server.Address()) + } + + dbCfg := DatabaseConfig{ + Host: "localhost", + Port: 5432, + User: "postgres", + Password: "", + DBName: "sub2api", + SSLMode: "disable", + } + if !strings.Contains(dbCfg.DSN(), "password=") { + } else { + t.Fatalf("DatabaseConfig.DSN() should not include password when empty") + } + + dbCfg.Password = "secret" + if !strings.Contains(dbCfg.DSN(), "password=secret") { + t.Fatalf("DatabaseConfig.DSN() missing password") + } + + dbCfg.Password = "" + if strings.Contains(dbCfg.DSNWithTimezone("UTC"), "password=") { + t.Fatalf("DatabaseConfig.DSNWithTimezone() should omit password when empty") + } + + if !strings.Contains(dbCfg.DSNWithTimezone(""), "TimeZone=Asia/Shanghai") { + t.Fatalf("DatabaseConfig.DSNWithTimezone() should use default timezone") + } + if !strings.Contains(dbCfg.DSNWithTimezone("UTC"), "TimeZone=UTC") { + t.Fatalf("DatabaseConfig.DSNWithTimezone() should use provided timezone") + } + + redis := RedisConfig{Host: "redis", Port: 6379} + if redis.Address() != "redis:6379" { + t.Fatalf("RedisConfig.Address() = %q", redis.Address()) + } +} + +func TestNormalizeStringSlice(t *testing.T) { + values := normalizeStringSlice([]string{" a ", "", "b", " ", "c"}) + if len(values) != 3 || values[0] != "a" || values[1] != "b" || values[2] != "c" { + t.Fatalf("normalizeStringSlice() unexpected result: %#v", values) + } + if normalizeStringSlice(nil) != nil { + t.Fatalf("normalizeStringSlice(nil) expected nil slice") + } +} + +func TestGetServerAddressFromEnv(t *testing.T) { + t.Setenv("SERVER_HOST", "127.0.0.1") + t.Setenv("SERVER_PORT", "9090") + + address := GetServerAddress() + if address != "127.0.0.1:9090" { + t.Fatalf("GetServerAddress() = %q", address) + } +} + +func TestValidateAbsoluteHTTPURL(t *testing.T) { + if err := ValidateAbsoluteHTTPURL("https://example.com/path"); err != nil { + t.Fatalf("ValidateAbsoluteHTTPURL valid url error: %v", err) + } + if err := ValidateAbsoluteHTTPURL(""); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject empty url") + } + if err := ValidateAbsoluteHTTPURL("/relative"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject relative url") + } + if err := ValidateAbsoluteHTTPURL("ftp://example.com"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject ftp scheme") + } + if err := ValidateAbsoluteHTTPURL("https://example.com/#frag"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject fragment") + } +} + +func TestValidateFrontendRedirectURL(t *testing.T) { + if err := ValidateFrontendRedirectURL("/auth/callback"); err != nil { + t.Fatalf("ValidateFrontendRedirectURL relative error: %v", err) + } + if err := ValidateFrontendRedirectURL("https://example.com/auth"); err != nil { + t.Fatalf("ValidateFrontendRedirectURL absolute error: %v", err) + } + if err := ValidateFrontendRedirectURL("example.com/path"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject non-absolute url") + } + if err := ValidateFrontendRedirectURL("//evil.com"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject // prefix") + } + if err := ValidateFrontendRedirectURL("javascript:alert(1)"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject javascript scheme") + } +} + +func TestWarnIfInsecureURL(t *testing.T) { + warnIfInsecureURL("test", "http://example.com") + warnIfInsecureURL("test", "bad://url") +} + +func TestGenerateJWTSecretDefaultLength(t *testing.T) { + secret, err := generateJWTSecret(0) + if err != nil { + t.Fatalf("generateJWTSecret error: %v", err) + } + if len(secret) == 0 { + t.Fatalf("generateJWTSecret returned empty string") + } +} + +func TestValidateOpsCleanupScheduleRequired(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + cfg.Ops.Cleanup.Enabled = true + cfg.Ops.Cleanup.Schedule = "" + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for ops.cleanup.schedule") + } + if !strings.Contains(err.Error(), "ops.cleanup.schedule") { + t.Fatalf("Validate() expected ops.cleanup.schedule error, got: %v", err) + } +} + +func TestValidateConcurrencyPingInterval(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + cfg.Concurrency.PingInterval = 3 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for concurrency.ping_interval") + } + if !strings.Contains(err.Error(), "concurrency.ping_interval") { + t.Fatalf("Validate() expected concurrency.ping_interval error, got: %v", err) + } +} + +func TestProvideConfig(t *testing.T) { + viper.Reset() + if _, err := ProvideConfig(); err != nil { + t.Fatalf("ProvideConfig() error: %v", err) + } +} + +func TestValidateConfigWithLinuxDoEnabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.Security.CSP.Enabled = true + cfg.Security.CSP.Policy = "default-src 'self'" + + cfg.LinuxDo.Enabled = true + cfg.LinuxDo.ClientID = "client" + cfg.LinuxDo.ClientSecret = "secret" + cfg.LinuxDo.AuthorizeURL = "https://example.com/oauth2/authorize" + cfg.LinuxDo.TokenURL = "https://example.com/oauth2/token" + cfg.LinuxDo.UserInfoURL = "https://example.com/oauth2/userinfo" + cfg.LinuxDo.RedirectURL = "https://example.com/api/v1/auth/oauth/linuxdo/callback" + cfg.LinuxDo.FrontendRedirectURL = "/auth/linuxdo/callback" + cfg.LinuxDo.TokenAuthMethod = "client_secret_post" + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() unexpected error: %v", err) + } +} + +func TestValidateJWTSecretStrength(t *testing.T) { + if !isWeakJWTSecret("change-me-in-production") { + t.Fatalf("isWeakJWTSecret should detect weak secret") + } + if isWeakJWTSecret("StrongSecretValue") { + t.Fatalf("isWeakJWTSecret should accept strong secret") + } +} + +func TestGenerateJWTSecretWithLength(t *testing.T) { + secret, err := generateJWTSecret(16) + if err != nil { + t.Fatalf("generateJWTSecret error: %v", err) + } + if len(secret) == 0 { + t.Fatalf("generateJWTSecret returned empty string") + } +} + +func TestValidateAbsoluteHTTPURLMissingHost(t *testing.T) { + if err := ValidateAbsoluteHTTPURL("https://"); err == nil { + t.Fatalf("ValidateAbsoluteHTTPURL should reject missing host") + } +} + +func TestValidateFrontendRedirectURLInvalidChars(t *testing.T) { + if err := ValidateFrontendRedirectURL("/auth/\ncallback"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject invalid chars") + } + if err := ValidateFrontendRedirectURL("http://"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject missing host") + } + if err := ValidateFrontendRedirectURL("mailto:user@example.com"); err == nil { + t.Fatalf("ValidateFrontendRedirectURL should reject mailto") + } +} + +func TestWarnIfInsecureURLHTTPS(t *testing.T) { + warnIfInsecureURL("secure", "https://example.com") +} + +func TestValidateConfigErrors(t *testing.T) { + buildValid := func(t *testing.T) *Config { + t.Helper() + viper.Reset() + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + return cfg + } + + cases := []struct { + name string + mutate func(*Config) + wantErr string + }{ + { + name: "jwt expire hour positive", + mutate: func(c *Config) { c.JWT.ExpireHour = 0 }, + wantErr: "jwt.expire_hour must be positive", + }, + { + name: "jwt expire hour max", + mutate: func(c *Config) { c.JWT.ExpireHour = 200 }, + wantErr: "jwt.expire_hour must be <= 168", + }, + { + name: "csp policy required", + mutate: func(c *Config) { c.Security.CSP.Enabled = true; c.Security.CSP.Policy = "" }, + wantErr: "security.csp.policy", + }, + { + name: "linuxdo client id required", + mutate: func(c *Config) { + c.LinuxDo.Enabled = true + c.LinuxDo.ClientID = "" + }, + wantErr: "linuxdo_connect.client_id", + }, + { + name: "linuxdo token auth method", + mutate: func(c *Config) { + c.LinuxDo.Enabled = true + c.LinuxDo.ClientID = "client" + c.LinuxDo.ClientSecret = "secret" + c.LinuxDo.AuthorizeURL = "https://example.com/authorize" + c.LinuxDo.TokenURL = "https://example.com/token" + c.LinuxDo.UserInfoURL = "https://example.com/userinfo" + c.LinuxDo.RedirectURL = "https://example.com/callback" + c.LinuxDo.FrontendRedirectURL = "/auth/callback" + c.LinuxDo.TokenAuthMethod = "invalid" + }, + wantErr: "linuxdo_connect.token_auth_method", + }, + { + name: "billing circuit breaker threshold", + mutate: func(c *Config) { c.Billing.CircuitBreaker.FailureThreshold = 0 }, + wantErr: "billing.circuit_breaker.failure_threshold", + }, + { + name: "billing circuit breaker reset", + mutate: func(c *Config) { c.Billing.CircuitBreaker.ResetTimeoutSeconds = 0 }, + wantErr: "billing.circuit_breaker.reset_timeout_seconds", + }, + { + name: "billing circuit breaker half open", + mutate: func(c *Config) { c.Billing.CircuitBreaker.HalfOpenRequests = 0 }, + wantErr: "billing.circuit_breaker.half_open_requests", + }, + { + name: "database max open conns", + mutate: func(c *Config) { c.Database.MaxOpenConns = 0 }, + wantErr: "database.max_open_conns", + }, + { + name: "database max lifetime", + mutate: func(c *Config) { c.Database.ConnMaxLifetimeMinutes = -1 }, + wantErr: "database.conn_max_lifetime_minutes", + }, + { + name: "database idle exceeds open", + mutate: func(c *Config) { c.Database.MaxIdleConns = c.Database.MaxOpenConns + 1 }, + wantErr: "database.max_idle_conns cannot exceed", + }, + { + name: "redis dial timeout", + mutate: func(c *Config) { c.Redis.DialTimeoutSeconds = 0 }, + wantErr: "redis.dial_timeout_seconds", + }, + { + name: "redis read timeout", + mutate: func(c *Config) { c.Redis.ReadTimeoutSeconds = 0 }, + wantErr: "redis.read_timeout_seconds", + }, + { + name: "redis write timeout", + mutate: func(c *Config) { c.Redis.WriteTimeoutSeconds = 0 }, + wantErr: "redis.write_timeout_seconds", + }, + { + name: "redis pool size", + mutate: func(c *Config) { c.Redis.PoolSize = 0 }, + wantErr: "redis.pool_size", + }, + { + name: "redis idle exceeds pool", + mutate: func(c *Config) { c.Redis.MinIdleConns = c.Redis.PoolSize + 1 }, + wantErr: "redis.min_idle_conns cannot exceed", + }, + { + name: "dashboard cache disabled negative", + mutate: func(c *Config) { c.Dashboard.Enabled = false; c.Dashboard.StatsTTLSeconds = -1 }, + wantErr: "dashboard_cache.stats_ttl_seconds", + }, + { + name: "dashboard cache fresh ttl positive", + mutate: func(c *Config) { c.Dashboard.Enabled = true; c.Dashboard.StatsFreshTTLSeconds = 0 }, + wantErr: "dashboard_cache.stats_fresh_ttl_seconds", + }, + { + name: "dashboard aggregation enabled interval", + mutate: func(c *Config) { c.DashboardAgg.Enabled = true; c.DashboardAgg.IntervalSeconds = 0 }, + wantErr: "dashboard_aggregation.interval_seconds", + }, + { + name: "dashboard aggregation backfill positive", + mutate: func(c *Config) { + c.DashboardAgg.Enabled = true + c.DashboardAgg.BackfillEnabled = true + c.DashboardAgg.BackfillMaxDays = 0 + }, + wantErr: "dashboard_aggregation.backfill_max_days", + }, + { + name: "dashboard aggregation retention", + mutate: func(c *Config) { c.DashboardAgg.Enabled = true; c.DashboardAgg.Retention.UsageLogsDays = 0 }, + wantErr: "dashboard_aggregation.retention.usage_logs_days", + }, + { + name: "dashboard aggregation disabled interval", + mutate: func(c *Config) { c.DashboardAgg.Enabled = false; c.DashboardAgg.IntervalSeconds = -1 }, + wantErr: "dashboard_aggregation.interval_seconds", + }, + { + name: "usage cleanup max range", + mutate: func(c *Config) { c.UsageCleanup.Enabled = true; c.UsageCleanup.MaxRangeDays = 0 }, + wantErr: "usage_cleanup.max_range_days", + }, + { + name: "usage cleanup worker interval", + mutate: func(c *Config) { c.UsageCleanup.Enabled = true; c.UsageCleanup.WorkerIntervalSeconds = 0 }, + wantErr: "usage_cleanup.worker_interval_seconds", + }, + { + name: "usage cleanup batch size", + mutate: func(c *Config) { c.UsageCleanup.Enabled = true; c.UsageCleanup.BatchSize = 0 }, + wantErr: "usage_cleanup.batch_size", + }, + { + name: "usage cleanup disabled negative", + mutate: func(c *Config) { c.UsageCleanup.Enabled = false; c.UsageCleanup.BatchSize = -1 }, + wantErr: "usage_cleanup.batch_size", + }, + { + name: "gateway max body size", + mutate: func(c *Config) { c.Gateway.MaxBodySize = 0 }, + wantErr: "gateway.max_body_size", + }, + { + name: "gateway max idle conns", + mutate: func(c *Config) { c.Gateway.MaxIdleConns = 0 }, + wantErr: "gateway.max_idle_conns", + }, + { + name: "gateway max idle conns per host", + mutate: func(c *Config) { c.Gateway.MaxIdleConnsPerHost = 0 }, + wantErr: "gateway.max_idle_conns_per_host", + }, + { + name: "gateway idle timeout", + mutate: func(c *Config) { c.Gateway.IdleConnTimeoutSeconds = 0 }, + wantErr: "gateway.idle_conn_timeout_seconds", + }, + { + name: "gateway max upstream clients", + mutate: func(c *Config) { c.Gateway.MaxUpstreamClients = 0 }, + wantErr: "gateway.max_upstream_clients", + }, + { + name: "gateway client idle ttl", + mutate: func(c *Config) { c.Gateway.ClientIdleTTLSeconds = 0 }, + wantErr: "gateway.client_idle_ttl_seconds", + }, + { + name: "gateway concurrency slot ttl", + mutate: func(c *Config) { c.Gateway.ConcurrencySlotTTLMinutes = 0 }, + wantErr: "gateway.concurrency_slot_ttl_minutes", + }, + { + name: "gateway max conns per host", + mutate: func(c *Config) { c.Gateway.MaxConnsPerHost = -1 }, + wantErr: "gateway.max_conns_per_host", + }, + { + name: "gateway connection isolation", + mutate: func(c *Config) { c.Gateway.ConnectionPoolIsolation = "invalid" }, + wantErr: "gateway.connection_pool_isolation", + }, + { + name: "gateway stream keepalive range", + mutate: func(c *Config) { c.Gateway.StreamKeepaliveInterval = 4 }, + wantErr: "gateway.stream_keepalive_interval", + }, + { + name: "gateway stream data interval range", + mutate: func(c *Config) { c.Gateway.StreamDataIntervalTimeout = 5 }, + wantErr: "gateway.stream_data_interval_timeout", + }, + { + name: "gateway stream data interval negative", + mutate: func(c *Config) { c.Gateway.StreamDataIntervalTimeout = -1 }, + wantErr: "gateway.stream_data_interval_timeout must be non-negative", + }, + { + name: "gateway max line size", + mutate: func(c *Config) { c.Gateway.MaxLineSize = 1024 }, + wantErr: "gateway.max_line_size must be at least", + }, + { + name: "gateway max line size negative", + mutate: func(c *Config) { c.Gateway.MaxLineSize = -1 }, + wantErr: "gateway.max_line_size must be non-negative", + }, + { + name: "gateway scheduling sticky waiting", + mutate: func(c *Config) { c.Gateway.Scheduling.StickySessionMaxWaiting = 0 }, + wantErr: "gateway.scheduling.sticky_session_max_waiting", + }, + { + name: "gateway scheduling outbox poll", + mutate: func(c *Config) { c.Gateway.Scheduling.OutboxPollIntervalSeconds = 0 }, + wantErr: "gateway.scheduling.outbox_poll_interval_seconds", + }, + { + name: "gateway scheduling outbox failures", + mutate: func(c *Config) { c.Gateway.Scheduling.OutboxLagRebuildFailures = 0 }, + wantErr: "gateway.scheduling.outbox_lag_rebuild_failures", + }, + { + name: "gateway outbox lag rebuild", + mutate: func(c *Config) { + c.Gateway.Scheduling.OutboxLagWarnSeconds = 10 + c.Gateway.Scheduling.OutboxLagRebuildSeconds = 5 + }, + wantErr: "gateway.scheduling.outbox_lag_rebuild_seconds", + }, + { + name: "ops metrics collector ttl", + mutate: func(c *Config) { c.Ops.MetricsCollectorCache.TTL = -1 }, + wantErr: "ops.metrics_collector_cache.ttl", + }, + { + name: "ops cleanup retention", + mutate: func(c *Config) { c.Ops.Cleanup.ErrorLogRetentionDays = -1 }, + wantErr: "ops.cleanup.error_log_retention_days", + }, + { + name: "ops cleanup minute retention", + mutate: func(c *Config) { c.Ops.Cleanup.MinuteMetricsRetentionDays = -1 }, + wantErr: "ops.cleanup.minute_metrics_retention_days", + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + cfg := buildValid(t) + tt.mutate(cfg) + err := cfg.Validate() + if err == nil || !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("Validate() error = %v, want %q", err, tt.wantErr) + } + }) + } +} diff --git a/backend/internal/domain/announcement.go b/backend/internal/domain/announcement.go new file mode 100644 index 00000000..7dc9a9cc --- /dev/null +++ b/backend/internal/domain/announcement.go @@ -0,0 +1,226 @@ +package domain + +import ( + "strings" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +const ( + AnnouncementStatusDraft = "draft" + AnnouncementStatusActive = "active" + AnnouncementStatusArchived = "archived" +) + +const ( + AnnouncementConditionTypeSubscription = "subscription" + AnnouncementConditionTypeBalance = "balance" +) + +const ( + AnnouncementOperatorIn = "in" + AnnouncementOperatorGT = "gt" + AnnouncementOperatorGTE = "gte" + AnnouncementOperatorLT = "lt" + AnnouncementOperatorLTE = "lte" + AnnouncementOperatorEQ = "eq" +) + +var ( + ErrAnnouncementNotFound = infraerrors.NotFound("ANNOUNCEMENT_NOT_FOUND", "announcement not found") + ErrAnnouncementInvalidTarget = infraerrors.BadRequest("ANNOUNCEMENT_INVALID_TARGET", "invalid announcement targeting rules") +) + +type AnnouncementTargeting struct { + // AnyOf 表示 OR:任意一个条件组满足即可展示。 + AnyOf []AnnouncementConditionGroup `json:"any_of,omitempty"` +} + +type AnnouncementConditionGroup struct { + // AllOf 表示 AND:组内所有条件都满足才算命中该组。 + AllOf []AnnouncementCondition `json:"all_of,omitempty"` +} + +type AnnouncementCondition struct { + // Type: subscription | balance + Type string `json:"type"` + + // Operator: + // - subscription: in + // - balance: gt/gte/lt/lte/eq + Operator string `json:"operator"` + + // subscription 条件:匹配的订阅套餐(group_id) + GroupIDs []int64 `json:"group_ids,omitempty"` + + // balance 条件:比较阈值 + Value float64 `json:"value,omitempty"` +} + +func (t AnnouncementTargeting) Matches(balance float64, activeSubscriptionGroupIDs map[int64]struct{}) bool { + // 空规则:展示给所有用户 + if len(t.AnyOf) == 0 { + return true + } + + for _, group := range t.AnyOf { + if len(group.AllOf) == 0 { + // 空条件组不命中(避免 OR 中出现无条件 “全命中”) + continue + } + allMatched := true + for _, cond := range group.AllOf { + if !cond.Matches(balance, activeSubscriptionGroupIDs) { + allMatched = false + break + } + } + if allMatched { + return true + } + } + + return false +} + +func (c AnnouncementCondition) Matches(balance float64, activeSubscriptionGroupIDs map[int64]struct{}) bool { + switch c.Type { + case AnnouncementConditionTypeSubscription: + if c.Operator != AnnouncementOperatorIn { + return false + } + if len(c.GroupIDs) == 0 { + return false + } + if len(activeSubscriptionGroupIDs) == 0 { + return false + } + for _, gid := range c.GroupIDs { + if _, ok := activeSubscriptionGroupIDs[gid]; ok { + return true + } + } + return false + + case AnnouncementConditionTypeBalance: + switch c.Operator { + case AnnouncementOperatorGT: + return balance > c.Value + case AnnouncementOperatorGTE: + return balance >= c.Value + case AnnouncementOperatorLT: + return balance < c.Value + case AnnouncementOperatorLTE: + return balance <= c.Value + case AnnouncementOperatorEQ: + return balance == c.Value + default: + return false + } + + default: + return false + } +} + +func (t AnnouncementTargeting) NormalizeAndValidate() (AnnouncementTargeting, error) { + normalized := AnnouncementTargeting{AnyOf: make([]AnnouncementConditionGroup, 0, len(t.AnyOf))} + + // 允许空 targeting(展示给所有用户) + if len(t.AnyOf) == 0 { + return normalized, nil + } + + if len(t.AnyOf) > 50 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + + for _, g := range t.AnyOf { + if len(g.AllOf) == 0 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + if len(g.AllOf) > 50 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + + group := AnnouncementConditionGroup{AllOf: make([]AnnouncementCondition, 0, len(g.AllOf))} + for _, c := range g.AllOf { + cond := AnnouncementCondition{ + Type: strings.TrimSpace(c.Type), + Operator: strings.TrimSpace(c.Operator), + Value: c.Value, + } + for _, gid := range c.GroupIDs { + if gid <= 0 { + return AnnouncementTargeting{}, ErrAnnouncementInvalidTarget + } + cond.GroupIDs = append(cond.GroupIDs, gid) + } + + if err := cond.validate(); err != nil { + return AnnouncementTargeting{}, err + } + group.AllOf = append(group.AllOf, cond) + } + + normalized.AnyOf = append(normalized.AnyOf, group) + } + + return normalized, nil +} + +func (c AnnouncementCondition) validate() error { + switch c.Type { + case AnnouncementConditionTypeSubscription: + if c.Operator != AnnouncementOperatorIn { + return ErrAnnouncementInvalidTarget + } + if len(c.GroupIDs) == 0 { + return ErrAnnouncementInvalidTarget + } + return nil + + case AnnouncementConditionTypeBalance: + switch c.Operator { + case AnnouncementOperatorGT, AnnouncementOperatorGTE, AnnouncementOperatorLT, AnnouncementOperatorLTE, AnnouncementOperatorEQ: + return nil + default: + return ErrAnnouncementInvalidTarget + } + + default: + return ErrAnnouncementInvalidTarget + } +} + +type Announcement struct { + ID int64 + Title string + Content string + Status string + Targeting AnnouncementTargeting + StartsAt *time.Time + EndsAt *time.Time + CreatedBy *int64 + UpdatedBy *int64 + CreatedAt time.Time + UpdatedAt time.Time +} + +func (a *Announcement) IsActiveAt(now time.Time) bool { + if a == nil { + return false + } + if a.Status != AnnouncementStatusActive { + return false + } + if a.StartsAt != nil && now.Before(*a.StartsAt) { + return false + } + if a.EndsAt != nil && !now.Before(*a.EndsAt) { + // ends_at 语义:到点即下线 + return false + } + return true +} diff --git a/backend/internal/domain/constants.go b/backend/internal/domain/constants.go new file mode 100644 index 00000000..4ecea9d8 --- /dev/null +++ b/backend/internal/domain/constants.go @@ -0,0 +1,64 @@ +package domain + +// Status constants +const ( + StatusActive = "active" + StatusDisabled = "disabled" + StatusError = "error" + StatusUnused = "unused" + StatusUsed = "used" + StatusExpired = "expired" +) + +// Role constants +const ( + RoleAdmin = "admin" + RoleUser = "user" +) + +// Platform constants +const ( + PlatformAnthropic = "anthropic" + PlatformOpenAI = "openai" + PlatformGemini = "gemini" + PlatformAntigravity = "antigravity" +) + +// Account type constants +const ( + AccountTypeOAuth = "oauth" // OAuth类型账号(full scope: profile + inference) + AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope) + AccountTypeAPIKey = "apikey" // API Key类型账号 +) + +// Redeem type constants +const ( + RedeemTypeBalance = "balance" + RedeemTypeConcurrency = "concurrency" + RedeemTypeSubscription = "subscription" +) + +// PromoCode status constants +const ( + PromoCodeStatusActive = "active" + PromoCodeStatusDisabled = "disabled" +) + +// Admin adjustment type constants +const ( + AdjustmentTypeAdminBalance = "admin_balance" // 管理员调整余额 + AdjustmentTypeAdminConcurrency = "admin_concurrency" // 管理员调整并发数 +) + +// Group subscription type constants +const ( + SubscriptionTypeStandard = "standard" // 标准计费模式(按余额扣费) + SubscriptionTypeSubscription = "subscription" // 订阅模式(按限额控制) +) + +// Subscription status constants +const ( + SubscriptionStatusActive = "active" + SubscriptionStatusExpired = "expired" + SubscriptionStatusSuspended = "suspended" +) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 579ada14..bbf5d026 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -45,6 +45,7 @@ type AccountHandler struct { concurrencyService *service.ConcurrencyService crsSyncService *service.CRSSyncService sessionLimitCache service.SessionLimitCache + tokenCacheInvalidator service.TokenCacheInvalidator } // NewAccountHandler creates a new admin account handler @@ -60,6 +61,7 @@ func NewAccountHandler( concurrencyService *service.ConcurrencyService, crsSyncService *service.CRSSyncService, sessionLimitCache service.SessionLimitCache, + tokenCacheInvalidator service.TokenCacheInvalidator, ) *AccountHandler { return &AccountHandler{ adminService: adminService, @@ -73,6 +75,7 @@ func NewAccountHandler( concurrencyService: concurrencyService, crsSyncService: crsSyncService, sessionLimitCache: sessionLimitCache, + tokenCacheInvalidator: tokenCacheInvalidator, } } @@ -129,13 +132,6 @@ type BulkUpdateAccountsRequest struct { ConfirmMixedChannelRisk *bool `json:"confirm_mixed_channel_risk"` // 用户确认混合渠道风险 } -// AccountLookupRequest 用于凭证身份信息查找账号 -type AccountLookupRequest struct { - Platform string `json:"platform" binding:"required"` - Emails []string `json:"emails" binding:"required,min=1"` - IdentityType string `json:"identity_type"` -} - // AccountWithConcurrency extends Account with real-time concurrency info type AccountWithConcurrency struct { *dto.Account @@ -180,6 +176,7 @@ func (h *AccountHandler) List(c *gin.Context) { // 识别需要查询窗口费用和会话数的账号(Anthropic OAuth/SetupToken 且启用了相应功能) windowCostAccountIDs := make([]int64, 0) sessionLimitAccountIDs := make([]int64, 0) + sessionIdleTimeouts := make(map[int64]time.Duration) // 各账号的会话空闲超时配置 for i := range accounts { acc := &accounts[i] if acc.IsAnthropicOAuthOrSetupToken() { @@ -188,6 +185,7 @@ func (h *AccountHandler) List(c *gin.Context) { } if acc.GetMaxSessions() > 0 { sessionLimitAccountIDs = append(sessionLimitAccountIDs, acc.ID) + sessionIdleTimeouts[acc.ID] = time.Duration(acc.GetSessionIdleTimeoutMinutes()) * time.Minute } } } @@ -196,9 +194,9 @@ func (h *AccountHandler) List(c *gin.Context) { var windowCosts map[int64]float64 var activeSessions map[int64]int - // 获取活跃会话数(批量查询) + // 获取活跃会话数(批量查询,传入各账号的 idleTimeout 配置) if len(sessionLimitAccountIDs) > 0 && h.sessionLimitCache != nil { - activeSessions, _ = h.sessionLimitCache.GetActiveSessionCountBatch(c.Request.Context(), sessionLimitAccountIDs) + activeSessions, _ = h.sessionLimitCache.GetActiveSessionCountBatch(c.Request.Context(), sessionLimitAccountIDs, sessionIdleTimeouts) if activeSessions == nil { activeSessions = make(map[int64]int) } @@ -218,12 +216,8 @@ func (h *AccountHandler) List(c *gin.Context) { } accCopy := acc // 闭包捕获 g.Go(func() error { - var startTime time.Time - if accCopy.SessionWindowStart != nil { - startTime = *accCopy.SessionWindowStart - } else { - startTime = time.Now().Add(-5 * time.Hour) - } + // 使用统一的窗口开始时间计算逻辑(考虑窗口过期情况) + startTime := accCopy.GetCurrentWindowStartTime() stats, err := h.accountUsageService.GetAccountWindowStats(gctx, accCopy.ID, startTime) if err == nil && stats != nil { mu.Lock() @@ -265,87 +259,6 @@ func (h *AccountHandler) List(c *gin.Context) { response.Paginated(c, result, total, page, pageSize) } -// Lookup 根据凭证身份信息查找账号 -// POST /api/v1/admin/accounts/lookup -func (h *AccountHandler) Lookup(c *gin.Context) { - var req AccountLookupRequest - if err := c.ShouldBindJSON(&req); err != nil { - response.BadRequest(c, "Invalid request: "+err.Error()) - return - } - - identityType := strings.TrimSpace(req.IdentityType) - if identityType == "" { - identityType = "credential_email" - } - if identityType != "credential_email" { - response.BadRequest(c, "Unsupported identity_type") - return - } - - platform := strings.TrimSpace(req.Platform) - if platform == "" { - response.BadRequest(c, "Platform is required") - return - } - - normalized := make([]string, 0, len(req.Emails)) - seen := make(map[string]struct{}) - for _, email := range req.Emails { - cleaned := strings.ToLower(strings.TrimSpace(email)) - if cleaned == "" { - continue - } - if _, ok := seen[cleaned]; ok { - continue - } - seen[cleaned] = struct{}{} - normalized = append(normalized, cleaned) - } - if len(normalized) == 0 { - response.BadRequest(c, "Emails is required") - return - } - - accounts, err := h.adminService.LookupAccountsByCredentialEmail(c.Request.Context(), platform, normalized) - if err != nil { - response.ErrorFrom(c, err) - return - } - - matchedMap := make(map[string]service.Account) - for _, account := range accounts { - email := strings.ToLower(strings.TrimSpace(account.GetCredential("email"))) - if email == "" { - continue - } - if _, ok := matchedMap[email]; ok { - continue - } - matchedMap[email] = account - } - - matched := make([]gin.H, 0, len(matchedMap)) - missing := make([]string, 0) - for _, email := range normalized { - if account, ok := matchedMap[email]; ok { - matched = append(matched, gin.H{ - "email": email, - "account_id": account.ID, - "platform": account.Platform, - "name": account.Name, - }) - continue - } - missing = append(missing, email) - } - - response.Success(c, gin.H{ - "matched": matched, - "missing": missing, - }) -} - // GetByID handles getting an account by ID // GET /api/v1/admin/accounts/:id func (h *AccountHandler) GetByID(c *gin.Context) { @@ -634,9 +547,18 @@ func (h *AccountHandler) Refresh(c *gin.Context) { } } - // 如果 project_id 获取失败,先更新凭证,再标记账户为 error + // 特殊处理 project_id:如果新值为空但旧值非空,保留旧值 + // 这确保了即使 LoadCodeAssist 失败,project_id 也不会丢失 + if newProjectID, _ := newCredentials["project_id"].(string); newProjectID == "" { + if oldProjectID := strings.TrimSpace(account.GetCredential("project_id")); oldProjectID != "" { + newCredentials["project_id"] = oldProjectID + } + } + + // 如果 project_id 获取失败,更新凭证但不标记为 error + // LoadCodeAssist 失败可能是临时网络问题,给它机会在下次自动刷新时重试 if tokenInfo.ProjectIDMissing { - // 先更新凭证 + // 先更新凭证(token 本身刷新成功了) _, updateErr := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ Credentials: newCredentials, }) @@ -644,14 +566,10 @@ func (h *AccountHandler) Refresh(c *gin.Context) { response.InternalError(c, "Failed to update credentials: "+updateErr.Error()) return } - // 标记账户为 error - if setErr := h.adminService.SetAccountError(c.Request.Context(), accountID, "missing_project_id: 账户缺少project id,可能无法使用Antigravity"); setErr != nil { - response.InternalError(c, "Failed to set account error: "+setErr.Error()) - return - } + // 不标记为 error,只返回警告信息 response.Success(c, gin.H{ - "message": "Token refreshed but project_id is missing, account marked as error", - "warning": "missing_project_id", + "message": "Token refreshed successfully, but project_id could not be retrieved (will retry automatically)", + "warning": "missing_project_id_temporary", }) return } @@ -698,6 +616,14 @@ func (h *AccountHandler) Refresh(c *gin.Context) { return } + // 刷新成功后,清除 token 缓存,确保下次请求使用新 token + if h.tokenCacheInvalidator != nil { + if invalidateErr := h.tokenCacheInvalidator.InvalidateToken(c.Request.Context(), updatedAccount); invalidateErr != nil { + // 缓存失效失败只记录日志,不影响主流程 + _ = c.Error(invalidateErr) + } + } + response.Success(c, dto.AccountFromService(updatedAccount)) } @@ -747,6 +673,15 @@ func (h *AccountHandler) ClearError(c *gin.Context) { return } + // 清除错误后,同时清除 token 缓存,确保下次请求会获取最新的 token(触发刷新或从 DB 读取) + // 这解决了管理员重置账号状态后,旧的失效 token 仍在缓存中导致立即再次 401 的问题 + if h.tokenCacheInvalidator != nil && account.IsOAuth() { + if invalidateErr := h.tokenCacheInvalidator.InvalidateToken(c.Request.Context(), account); invalidateErr != nil { + // 缓存失效失败只记录日志,不影响主流程 + _ = c.Error(invalidateErr) + } + } + response.Success(c, dto.AccountFromService(account)) } diff --git a/backend/internal/handler/admin/admin_basic_handlers_test.go b/backend/internal/handler/admin/admin_basic_handlers_test.go new file mode 100644 index 00000000..e0f731e1 --- /dev/null +++ b/backend/internal/handler/admin/admin_basic_handlers_test.go @@ -0,0 +1,262 @@ +package admin + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func setupAdminRouter() (*gin.Engine, *stubAdminService) { + gin.SetMode(gin.TestMode) + router := gin.New() + adminSvc := newStubAdminService() + + userHandler := NewUserHandler(adminSvc) + groupHandler := NewGroupHandler(adminSvc) + proxyHandler := NewProxyHandler(adminSvc) + redeemHandler := NewRedeemHandler(adminSvc) + + router.GET("/api/v1/admin/users", userHandler.List) + router.GET("/api/v1/admin/users/:id", userHandler.GetByID) + router.POST("/api/v1/admin/users", userHandler.Create) + router.PUT("/api/v1/admin/users/:id", userHandler.Update) + router.DELETE("/api/v1/admin/users/:id", userHandler.Delete) + router.POST("/api/v1/admin/users/:id/balance", userHandler.UpdateBalance) + router.GET("/api/v1/admin/users/:id/api-keys", userHandler.GetUserAPIKeys) + router.GET("/api/v1/admin/users/:id/usage", userHandler.GetUserUsage) + + router.GET("/api/v1/admin/groups", groupHandler.List) + router.GET("/api/v1/admin/groups/all", groupHandler.GetAll) + router.GET("/api/v1/admin/groups/:id", groupHandler.GetByID) + router.POST("/api/v1/admin/groups", groupHandler.Create) + router.PUT("/api/v1/admin/groups/:id", groupHandler.Update) + router.DELETE("/api/v1/admin/groups/:id", groupHandler.Delete) + router.GET("/api/v1/admin/groups/:id/stats", groupHandler.GetStats) + router.GET("/api/v1/admin/groups/:id/api-keys", groupHandler.GetGroupAPIKeys) + + router.GET("/api/v1/admin/proxies", proxyHandler.List) + router.GET("/api/v1/admin/proxies/all", proxyHandler.GetAll) + router.GET("/api/v1/admin/proxies/:id", proxyHandler.GetByID) + router.POST("/api/v1/admin/proxies", proxyHandler.Create) + router.PUT("/api/v1/admin/proxies/:id", proxyHandler.Update) + router.DELETE("/api/v1/admin/proxies/:id", proxyHandler.Delete) + router.POST("/api/v1/admin/proxies/batch-delete", proxyHandler.BatchDelete) + router.POST("/api/v1/admin/proxies/:id/test", proxyHandler.Test) + router.GET("/api/v1/admin/proxies/:id/stats", proxyHandler.GetStats) + router.GET("/api/v1/admin/proxies/:id/accounts", proxyHandler.GetProxyAccounts) + + router.GET("/api/v1/admin/redeem-codes", redeemHandler.List) + router.GET("/api/v1/admin/redeem-codes/:id", redeemHandler.GetByID) + router.POST("/api/v1/admin/redeem-codes", redeemHandler.Generate) + router.DELETE("/api/v1/admin/redeem-codes/:id", redeemHandler.Delete) + router.POST("/api/v1/admin/redeem-codes/batch-delete", redeemHandler.BatchDelete) + router.POST("/api/v1/admin/redeem-codes/:id/expire", redeemHandler.Expire) + router.GET("/api/v1/admin/redeem-codes/:id/stats", redeemHandler.GetStats) + + return router, adminSvc +} + +func TestUserHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/users?page=1&page_size=20", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/users/1", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + createBody := map[string]any{"email": "new@example.com", "password": "pass123", "balance": 1, "concurrency": 2} + body, _ := json.Marshal(createBody) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/users", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + updateBody := map[string]any{"email": "updated@example.com"} + body, _ = json.Marshal(updateBody) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPut, "/api/v1/admin/users/1", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/users/1", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/users/1/balance", bytes.NewBufferString(`{"balance":1,"operation":"add"}`)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/users/1/api-keys", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/users/1/usage?period=today", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestGroupHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/all", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/2", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ := json.Marshal(map[string]any{"name": "new", "platform": "anthropic", "subscription_type": "standard"}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/groups", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ = json.Marshal(map[string]any{"name": "update"}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPut, "/api/v1/admin/groups/2", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/groups/2", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/2/stats", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/groups/2/api-keys", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestProxyHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/all", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/4", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ := json.Marshal(map[string]any{"name": "proxy", "protocol": "http", "host": "localhost", "port": 8080}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/proxies", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ = json.Marshal(map[string]any{"name": "proxy2"}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPut, "/api/v1/admin/proxies/4", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/proxies/4", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/proxies/batch-delete", bytes.NewBufferString(`{"ids":[1,2]}`)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/proxies/4/test", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/4/stats", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/4/accounts", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestRedeemHandlerEndpoints(t *testing.T) { + router, _ := setupAdminRouter() + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/redeem-codes", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/redeem-codes/5", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + body, _ := json.Marshal(map[string]any{"count": 1, "type": "balance", "value": 10}) + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/redeem-codes", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodDelete, "/api/v1/admin/redeem-codes/5", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/redeem-codes/batch-delete", bytes.NewBufferString(`{"ids":[1,2]}`)) + req.Header.Set("Content-Type", "application/json") + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodPost, "/api/v1/admin/redeem-codes/5/expire", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + req = httptest.NewRequest(http.MethodGet, "/api/v1/admin/redeem-codes/5/stats", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) +} diff --git a/backend/internal/handler/admin/admin_helpers_test.go b/backend/internal/handler/admin/admin_helpers_test.go new file mode 100644 index 00000000..863c755c --- /dev/null +++ b/backend/internal/handler/admin/admin_helpers_test.go @@ -0,0 +1,134 @@ +package admin + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/netip" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestParseTimeRange(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + req := httptest.NewRequest(http.MethodGet, "/?start_date=2024-01-01&end_date=2024-01-02&timezone=UTC", nil) + c.Request = req + + start, end := parseTimeRange(c) + require.Equal(t, time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), start) + require.Equal(t, time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC), end) + + req = httptest.NewRequest(http.MethodGet, "/?start_date=bad&timezone=UTC", nil) + c.Request = req + start, end = parseTimeRange(c) + require.False(t, start.IsZero()) + require.False(t, end.IsZero()) +} + +func TestParseOpsViewParam(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/?view=excluded", nil) + require.Equal(t, opsListViewExcluded, parseOpsViewParam(c)) + + c2, _ := gin.CreateTestContext(w) + c2.Request = httptest.NewRequest(http.MethodGet, "/?view=all", nil) + require.Equal(t, opsListViewAll, parseOpsViewParam(c2)) + + c3, _ := gin.CreateTestContext(w) + c3.Request = httptest.NewRequest(http.MethodGet, "/?view=unknown", nil) + require.Equal(t, opsListViewErrors, parseOpsViewParam(c3)) + + require.Equal(t, "", parseOpsViewParam(nil)) +} + +func TestParseOpsDuration(t *testing.T) { + dur, ok := parseOpsDuration("1h") + require.True(t, ok) + require.Equal(t, time.Hour, dur) + + _, ok = parseOpsDuration("invalid") + require.False(t, ok) +} + +func TestParseOpsTimeRange(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + now := time.Now().UTC() + startStr := now.Add(-time.Hour).Format(time.RFC3339) + endStr := now.Format(time.RFC3339) + c.Request = httptest.NewRequest(http.MethodGet, "/?start_time="+startStr+"&end_time="+endStr, nil) + start, end, err := parseOpsTimeRange(c, "1h") + require.NoError(t, err) + require.True(t, start.Before(end)) + + c2, _ := gin.CreateTestContext(w) + c2.Request = httptest.NewRequest(http.MethodGet, "/?start_time=bad", nil) + _, _, err = parseOpsTimeRange(c2, "1h") + require.Error(t, err) +} + +func TestParseOpsRealtimeWindow(t *testing.T) { + dur, label, ok := parseOpsRealtimeWindow("5m") + require.True(t, ok) + require.Equal(t, 5*time.Minute, dur) + require.Equal(t, "5min", label) + + _, _, ok = parseOpsRealtimeWindow("invalid") + require.False(t, ok) +} + +func TestPickThroughputBucketSeconds(t *testing.T) { + require.Equal(t, 60, pickThroughputBucketSeconds(30*time.Minute)) + require.Equal(t, 300, pickThroughputBucketSeconds(6*time.Hour)) + require.Equal(t, 3600, pickThroughputBucketSeconds(48*time.Hour)) +} + +func TestParseOpsQueryMode(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/?mode=raw", nil) + require.Equal(t, service.ParseOpsQueryMode("raw"), parseOpsQueryMode(c)) + require.Equal(t, service.OpsQueryMode(""), parseOpsQueryMode(nil)) +} + +func TestOpsAlertRuleValidation(t *testing.T) { + raw := map[string]json.RawMessage{ + "name": json.RawMessage(`"High error rate"`), + "metric_type": json.RawMessage(`"error_rate"`), + "operator": json.RawMessage(`">"`), + "threshold": json.RawMessage(`90`), + } + + validated, err := validateOpsAlertRulePayload(raw) + require.NoError(t, err) + require.Equal(t, "High error rate", validated.Name) + + _, err = validateOpsAlertRulePayload(map[string]json.RawMessage{}) + require.Error(t, err) + + require.True(t, isPercentOrRateMetric("error_rate")) + require.False(t, isPercentOrRateMetric("concurrency_queue_depth")) +} + +func TestOpsWSHelpers(t *testing.T) { + prefixes, invalid := parseTrustedProxyList("10.0.0.0/8,invalid") + require.Len(t, prefixes, 1) + require.Len(t, invalid, 1) + + host := hostWithoutPort("example.com:443") + require.Equal(t, "example.com", host) + + addr := netip.MustParseAddr("10.0.0.1") + require.True(t, isAddrInTrustedProxies(addr, prefixes)) + require.False(t, isAddrInTrustedProxies(netip.MustParseAddr("192.168.0.1"), prefixes)) +} diff --git a/backend/internal/handler/admin/admin_service_stub_test.go b/backend/internal/handler/admin/admin_service_stub_test.go new file mode 100644 index 00000000..b820a3fb --- /dev/null +++ b/backend/internal/handler/admin/admin_service_stub_test.go @@ -0,0 +1,294 @@ +package admin + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type stubAdminService struct { + users []service.User + apiKeys []service.APIKey + groups []service.Group + accounts []service.Account + proxies []service.Proxy + proxyCounts []service.ProxyWithAccountCount + redeems []service.RedeemCode +} + +func newStubAdminService() *stubAdminService { + now := time.Now().UTC() + user := service.User{ + ID: 1, + Email: "user@example.com", + Role: service.RoleUser, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + apiKey := service.APIKey{ + ID: 10, + UserID: user.ID, + Key: "sk-test", + Name: "test", + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + group := service.Group{ + ID: 2, + Name: "group", + Platform: service.PlatformAnthropic, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + account := service.Account{ + ID: 3, + Name: "account", + Platform: service.PlatformAnthropic, + Type: service.AccountTypeOAuth, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + proxy := service.Proxy{ + ID: 4, + Name: "proxy", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: service.StatusActive, + CreatedAt: now, + UpdatedAt: now, + } + redeem := service.RedeemCode{ + ID: 5, + Code: "R-TEST", + Type: service.RedeemTypeBalance, + Value: 10, + Status: service.StatusUnused, + CreatedAt: now, + } + return &stubAdminService{ + users: []service.User{user}, + apiKeys: []service.APIKey{apiKey}, + groups: []service.Group{group}, + accounts: []service.Account{account}, + proxies: []service.Proxy{proxy}, + proxyCounts: []service.ProxyWithAccountCount{{Proxy: proxy, AccountCount: 1}}, + redeems: []service.RedeemCode{redeem}, + } +} + +func (s *stubAdminService) ListUsers(ctx context.Context, page, pageSize int, filters service.UserListFilters) ([]service.User, int64, error) { + return s.users, int64(len(s.users)), nil +} + +func (s *stubAdminService) GetUser(ctx context.Context, id int64) (*service.User, error) { + for i := range s.users { + if s.users[i].ID == id { + return &s.users[i], nil + } + } + user := service.User{ID: id, Email: "user@example.com", Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) CreateUser(ctx context.Context, input *service.CreateUserInput) (*service.User, error) { + user := service.User{ID: 100, Email: input.Email, Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) UpdateUser(ctx context.Context, id int64, input *service.UpdateUserInput) (*service.User, error) { + user := service.User{ID: id, Email: "updated@example.com", Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) DeleteUser(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) UpdateUserBalance(ctx context.Context, userID int64, balance float64, operation string, notes string) (*service.User, error) { + user := service.User{ID: userID, Balance: balance, Status: service.StatusActive} + return &user, nil +} + +func (s *stubAdminService) GetUserAPIKeys(ctx context.Context, userID int64, page, pageSize int) ([]service.APIKey, int64, error) { + return s.apiKeys, int64(len(s.apiKeys)), nil +} + +func (s *stubAdminService) GetUserUsageStats(ctx context.Context, userID int64, period string) (any, error) { + return map[string]any{"user_id": userID}, nil +} + +func (s *stubAdminService) ListGroups(ctx context.Context, page, pageSize int, platform, status, search string, isExclusive *bool) ([]service.Group, int64, error) { + return s.groups, int64(len(s.groups)), nil +} + +func (s *stubAdminService) GetAllGroups(ctx context.Context) ([]service.Group, error) { + return s.groups, nil +} + +func (s *stubAdminService) GetAllGroupsByPlatform(ctx context.Context, platform string) ([]service.Group, error) { + return s.groups, nil +} + +func (s *stubAdminService) GetGroup(ctx context.Context, id int64) (*service.Group, error) { + group := service.Group{ID: id, Name: "group", Status: service.StatusActive} + return &group, nil +} + +func (s *stubAdminService) CreateGroup(ctx context.Context, input *service.CreateGroupInput) (*service.Group, error) { + group := service.Group{ID: 200, Name: input.Name, Status: service.StatusActive} + return &group, nil +} + +func (s *stubAdminService) UpdateGroup(ctx context.Context, id int64, input *service.UpdateGroupInput) (*service.Group, error) { + group := service.Group{ID: id, Name: input.Name, Status: service.StatusActive} + return &group, nil +} + +func (s *stubAdminService) DeleteGroup(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) GetGroupAPIKeys(ctx context.Context, groupID int64, page, pageSize int) ([]service.APIKey, int64, error) { + return s.apiKeys, int64(len(s.apiKeys)), nil +} + +func (s *stubAdminService) ListAccounts(ctx context.Context, page, pageSize int, platform, accountType, status, search string) ([]service.Account, int64, error) { + return s.accounts, int64(len(s.accounts)), nil +} + +func (s *stubAdminService) GetAccount(ctx context.Context, id int64) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) GetAccountsByIDs(ctx context.Context, ids []int64) ([]*service.Account, error) { + out := make([]*service.Account, 0, len(ids)) + for _, id := range ids { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + out = append(out, &account) + } + return out, nil +} + +func (s *stubAdminService) CreateAccount(ctx context.Context, input *service.CreateAccountInput) (*service.Account, error) { + account := service.Account{ID: 300, Name: input.Name, Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) UpdateAccount(ctx context.Context, id int64, input *service.UpdateAccountInput) (*service.Account, error) { + account := service.Account{ID: id, Name: input.Name, Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) DeleteAccount(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) RefreshAccountCredentials(ctx context.Context, id int64) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) ClearAccountError(ctx context.Context, id int64) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive} + return &account, nil +} + +func (s *stubAdminService) SetAccountError(ctx context.Context, id int64, errorMsg string) error { + return nil +} + +func (s *stubAdminService) SetAccountSchedulable(ctx context.Context, id int64, schedulable bool) (*service.Account, error) { + account := service.Account{ID: id, Name: "account", Status: service.StatusActive, Schedulable: schedulable} + return &account, nil +} + +func (s *stubAdminService) BulkUpdateAccounts(ctx context.Context, input *service.BulkUpdateAccountsInput) (*service.BulkUpdateAccountsResult, error) { + return &service.BulkUpdateAccountsResult{Success: 1, Failed: 0, SuccessIDs: []int64{1}}, nil +} + +func (s *stubAdminService) ListProxies(ctx context.Context, page, pageSize int, protocol, status, search string) ([]service.Proxy, int64, error) { + return s.proxies, int64(len(s.proxies)), nil +} + +func (s *stubAdminService) ListProxiesWithAccountCount(ctx context.Context, page, pageSize int, protocol, status, search string) ([]service.ProxyWithAccountCount, int64, error) { + return s.proxyCounts, int64(len(s.proxyCounts)), nil +} + +func (s *stubAdminService) GetAllProxies(ctx context.Context) ([]service.Proxy, error) { + return s.proxies, nil +} + +func (s *stubAdminService) GetAllProxiesWithAccountCount(ctx context.Context) ([]service.ProxyWithAccountCount, error) { + return s.proxyCounts, nil +} + +func (s *stubAdminService) GetProxy(ctx context.Context, id int64) (*service.Proxy, error) { + proxy := service.Proxy{ID: id, Name: "proxy", Status: service.StatusActive} + return &proxy, nil +} + +func (s *stubAdminService) CreateProxy(ctx context.Context, input *service.CreateProxyInput) (*service.Proxy, error) { + proxy := service.Proxy{ID: 400, Name: input.Name, Status: service.StatusActive} + return &proxy, nil +} + +func (s *stubAdminService) UpdateProxy(ctx context.Context, id int64, input *service.UpdateProxyInput) (*service.Proxy, error) { + proxy := service.Proxy{ID: id, Name: input.Name, Status: service.StatusActive} + return &proxy, nil +} + +func (s *stubAdminService) DeleteProxy(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) BatchDeleteProxies(ctx context.Context, ids []int64) (*service.ProxyBatchDeleteResult, error) { + return &service.ProxyBatchDeleteResult{DeletedIDs: ids}, nil +} + +func (s *stubAdminService) GetProxyAccounts(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) { + return []service.ProxyAccountSummary{{ID: 1, Name: "account"}}, nil +} + +func (s *stubAdminService) CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error) { + return false, nil +} + +func (s *stubAdminService) TestProxy(ctx context.Context, id int64) (*service.ProxyTestResult, error) { + return &service.ProxyTestResult{Success: true, Message: "ok"}, nil +} + +func (s *stubAdminService) ListRedeemCodes(ctx context.Context, page, pageSize int, codeType, status, search string) ([]service.RedeemCode, int64, error) { + return s.redeems, int64(len(s.redeems)), nil +} + +func (s *stubAdminService) GetRedeemCode(ctx context.Context, id int64) (*service.RedeemCode, error) { + code := service.RedeemCode{ID: id, Code: "R-TEST", Status: service.StatusUnused} + return &code, nil +} + +func (s *stubAdminService) GenerateRedeemCodes(ctx context.Context, input *service.GenerateRedeemCodesInput) ([]service.RedeemCode, error) { + return s.redeems, nil +} + +func (s *stubAdminService) DeleteRedeemCode(ctx context.Context, id int64) error { + return nil +} + +func (s *stubAdminService) BatchDeleteRedeemCodes(ctx context.Context, ids []int64) (int64, error) { + return int64(len(ids)), nil +} + +func (s *stubAdminService) ExpireRedeemCode(ctx context.Context, id int64) (*service.RedeemCode, error) { + code := service.RedeemCode{ID: id, Code: "R-TEST", Status: service.StatusUsed} + return &code, nil +} + +// Ensure stub implements interface. +var _ service.AdminService = (*stubAdminService)(nil) diff --git a/backend/internal/handler/admin/announcement_handler.go b/backend/internal/handler/admin/announcement_handler.go new file mode 100644 index 00000000..0b5d0fbc --- /dev/null +++ b/backend/internal/handler/admin/announcement_handler.go @@ -0,0 +1,246 @@ +package admin + +import ( + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// AnnouncementHandler handles admin announcement management +type AnnouncementHandler struct { + announcementService *service.AnnouncementService +} + +// NewAnnouncementHandler creates a new admin announcement handler +func NewAnnouncementHandler(announcementService *service.AnnouncementService) *AnnouncementHandler { + return &AnnouncementHandler{ + announcementService: announcementService, + } +} + +type CreateAnnouncementRequest struct { + Title string `json:"title" binding:"required"` + Content string `json:"content" binding:"required"` + Status string `json:"status" binding:"omitempty,oneof=draft active archived"` + Targeting service.AnnouncementTargeting `json:"targeting"` + StartsAt *int64 `json:"starts_at"` // Unix seconds, 0/empty = immediate + EndsAt *int64 `json:"ends_at"` // Unix seconds, 0/empty = never +} + +type UpdateAnnouncementRequest struct { + Title *string `json:"title"` + Content *string `json:"content"` + Status *string `json:"status" binding:"omitempty,oneof=draft active archived"` + Targeting *service.AnnouncementTargeting `json:"targeting"` + StartsAt *int64 `json:"starts_at"` // Unix seconds, 0 = clear + EndsAt *int64 `json:"ends_at"` // Unix seconds, 0 = clear +} + +// List handles listing announcements with filters +// GET /api/v1/admin/announcements +func (h *AnnouncementHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + status := strings.TrimSpace(c.Query("status")) + search := strings.TrimSpace(c.Query("search")) + if len(search) > 200 { + search = search[:200] + } + + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + + items, paginationResult, err := h.announcementService.List( + c.Request.Context(), + params, + service.AnnouncementListFilters{Status: status, Search: search}, + ) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.Announcement, 0, len(items)) + for i := range items { + out = append(out, *dto.AnnouncementFromService(&items[i])) + } + response.Paginated(c, out, paginationResult.Total, page, pageSize) +} + +// GetByID handles getting an announcement by ID +// GET /api/v1/admin/announcements/:id +func (h *AnnouncementHandler) GetByID(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + item, err := h.announcementService.GetByID(c.Request.Context(), announcementID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AnnouncementFromService(item)) +} + +// Create handles creating a new announcement +// POST /api/v1/admin/announcements +func (h *AnnouncementHandler) Create(c *gin.Context) { + var req CreateAnnouncementRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + input := &service.CreateAnnouncementInput{ + Title: req.Title, + Content: req.Content, + Status: req.Status, + Targeting: req.Targeting, + ActorID: &subject.UserID, + } + + if req.StartsAt != nil && *req.StartsAt > 0 { + t := time.Unix(*req.StartsAt, 0) + input.StartsAt = &t + } + if req.EndsAt != nil && *req.EndsAt > 0 { + t := time.Unix(*req.EndsAt, 0) + input.EndsAt = &t + } + + created, err := h.announcementService.Create(c.Request.Context(), input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AnnouncementFromService(created)) +} + +// Update handles updating an announcement +// PUT /api/v1/admin/announcements/:id +func (h *AnnouncementHandler) Update(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + var req UpdateAnnouncementRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + input := &service.UpdateAnnouncementInput{ + Title: req.Title, + Content: req.Content, + Status: req.Status, + Targeting: req.Targeting, + ActorID: &subject.UserID, + } + + if req.StartsAt != nil { + if *req.StartsAt == 0 { + var cleared *time.Time = nil + input.StartsAt = &cleared + } else { + t := time.Unix(*req.StartsAt, 0) + ptr := &t + input.StartsAt = &ptr + } + } + + if req.EndsAt != nil { + if *req.EndsAt == 0 { + var cleared *time.Time = nil + input.EndsAt = &cleared + } else { + t := time.Unix(*req.EndsAt, 0) + ptr := &t + input.EndsAt = &ptr + } + } + + updated, err := h.announcementService.Update(c.Request.Context(), announcementID, input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AnnouncementFromService(updated)) +} + +// Delete handles deleting an announcement +// DELETE /api/v1/admin/announcements/:id +func (h *AnnouncementHandler) Delete(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + if err := h.announcementService.Delete(c.Request.Context(), announcementID); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Announcement deleted successfully"}) +} + +// ListReadStatus handles listing users read status for an announcement +// GET /api/v1/admin/announcements/:id/read-status +func (h *AnnouncementHandler) ListReadStatus(c *gin.Context) { + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + page, pageSize := response.ParsePagination(c) + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + search := strings.TrimSpace(c.Query("search")) + if len(search) > 200 { + search = search[:200] + } + + items, paginationResult, err := h.announcementService.ListUserReadStatus( + c.Request.Context(), + announcementID, + params, + search, + ) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Paginated(c, items, paginationResult.Total, page, pageSize) +} diff --git a/backend/internal/handler/admin/dashboard_handler.go b/backend/internal/handler/admin/dashboard_handler.go index 3f07403d..18365186 100644 --- a/backend/internal/handler/admin/dashboard_handler.go +++ b/backend/internal/handler/admin/dashboard_handler.go @@ -186,7 +186,7 @@ func (h *DashboardHandler) GetRealtimeMetrics(c *gin.Context) { // GetUsageTrend handles getting usage trend data // GET /api/v1/admin/dashboard/trend -// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream +// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream, billing_type func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { startTime, endTime := parseTimeRange(c) granularity := c.DefaultQuery("granularity", "day") @@ -195,6 +195,7 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { var userID, apiKeyID, accountID, groupID int64 var model string var stream *bool + var billingType *int8 if userIDStr := c.Query("user_id"); userIDStr != "" { if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { @@ -224,8 +225,17 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { stream = &streamVal } } + if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { + if v, err := strconv.ParseInt(billingTypeStr, 10, 8); err == nil { + bt := int8(v) + billingType = &bt + } else { + response.BadRequest(c, "Invalid billing_type") + return + } + } - trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) + trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream, billingType) if err != nil { response.Error(c, 500, "Failed to get usage trend") return @@ -241,13 +251,14 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { // GetModelStats handles getting model usage statistics // GET /api/v1/admin/dashboard/models -// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream +// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream, billing_type func (h *DashboardHandler) GetModelStats(c *gin.Context) { startTime, endTime := parseTimeRange(c) // Parse optional filter params var userID, apiKeyID, accountID, groupID int64 var stream *bool + var billingType *int8 if userIDStr := c.Query("user_id"); userIDStr != "" { if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { @@ -274,8 +285,17 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) { stream = &streamVal } } + if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { + if v, err := strconv.ParseInt(billingTypeStr, 10, 8); err == nil { + bt := int8(v) + billingType = &bt + } else { + response.BadRequest(c, "Invalid billing_type") + return + } + } - stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream) + stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType) if err != nil { response.Error(c, 500, "Failed to get model statistics") return diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go index 1df5af8c..32391418 100644 --- a/backend/internal/handler/admin/group_handler.go +++ b/backend/internal/handler/admin/group_handler.go @@ -98,9 +98,9 @@ func (h *GroupHandler) List(c *gin.Context) { return } - outGroups := make([]dto.Group, 0, len(groups)) + outGroups := make([]dto.AdminGroup, 0, len(groups)) for i := range groups { - outGroups = append(outGroups, *dto.GroupFromService(&groups[i])) + outGroups = append(outGroups, *dto.GroupFromServiceAdmin(&groups[i])) } response.Paginated(c, outGroups, total, page, pageSize) } @@ -124,9 +124,9 @@ func (h *GroupHandler) GetAll(c *gin.Context) { return } - outGroups := make([]dto.Group, 0, len(groups)) + outGroups := make([]dto.AdminGroup, 0, len(groups)) for i := range groups { - outGroups = append(outGroups, *dto.GroupFromService(&groups[i])) + outGroups = append(outGroups, *dto.GroupFromServiceAdmin(&groups[i])) } response.Success(c, outGroups) } @@ -146,7 +146,7 @@ func (h *GroupHandler) GetByID(c *gin.Context) { return } - response.Success(c, dto.GroupFromService(group)) + response.Success(c, dto.GroupFromServiceAdmin(group)) } // Create handles creating a new group @@ -183,7 +183,7 @@ func (h *GroupHandler) Create(c *gin.Context) { return } - response.Success(c, dto.GroupFromService(group)) + response.Success(c, dto.GroupFromServiceAdmin(group)) } // Update handles updating a group @@ -227,7 +227,7 @@ func (h *GroupHandler) Update(c *gin.Context) { return } - response.Success(c, dto.GroupFromService(group)) + response.Success(c, dto.GroupFromServiceAdmin(group)) } // Delete handles deleting a group diff --git a/backend/internal/handler/admin/redeem_handler.go b/backend/internal/handler/admin/redeem_handler.go index 5b3229b6..f1b68334 100644 --- a/backend/internal/handler/admin/redeem_handler.go +++ b/backend/internal/handler/admin/redeem_handler.go @@ -54,9 +54,9 @@ func (h *RedeemHandler) List(c *gin.Context) { return } - out := make([]dto.RedeemCode, 0, len(codes)) + out := make([]dto.AdminRedeemCode, 0, len(codes)) for i := range codes { - out = append(out, *dto.RedeemCodeFromService(&codes[i])) + out = append(out, *dto.RedeemCodeFromServiceAdmin(&codes[i])) } response.Paginated(c, out, total, page, pageSize) } @@ -76,7 +76,7 @@ func (h *RedeemHandler) GetByID(c *gin.Context) { return } - response.Success(c, dto.RedeemCodeFromService(code)) + response.Success(c, dto.RedeemCodeFromServiceAdmin(code)) } // Generate handles generating new redeem codes @@ -100,9 +100,9 @@ func (h *RedeemHandler) Generate(c *gin.Context) { return } - out := make([]dto.RedeemCode, 0, len(codes)) + out := make([]dto.AdminRedeemCode, 0, len(codes)) for i := range codes { - out = append(out, *dto.RedeemCodeFromService(&codes[i])) + out = append(out, *dto.RedeemCodeFromServiceAdmin(&codes[i])) } response.Success(c, out) } @@ -163,7 +163,7 @@ func (h *RedeemHandler) Expire(c *gin.Context) { return } - response.Success(c, dto.RedeemCodeFromService(code)) + response.Success(c, dto.RedeemCodeFromServiceAdmin(code)) } // GetStats handles getting redeem code statistics diff --git a/backend/internal/handler/admin/setting_handler.go b/backend/internal/handler/admin/setting_handler.go index 6666ce4e..cdad3659 100644 --- a/backend/internal/handler/admin/setting_handler.go +++ b/backend/internal/handler/admin/setting_handler.go @@ -47,6 +47,10 @@ func (h *SettingHandler) GetSettings(c *gin.Context) { response.Success(c, dto.SystemSettings{ RegistrationEnabled: settings.RegistrationEnabled, EmailVerifyEnabled: settings.EmailVerifyEnabled, + PromoCodeEnabled: settings.PromoCodeEnabled, + PasswordResetEnabled: settings.PasswordResetEnabled, + TotpEnabled: settings.TotpEnabled, + TotpEncryptionKeyConfigured: h.settingService.IsTotpEncryptionKeyConfigured(), SMTPHost: settings.SMTPHost, SMTPPort: settings.SMTPPort, SMTPUsername: settings.SMTPUsername, @@ -68,6 +72,9 @@ func (h *SettingHandler) GetSettings(c *gin.Context) { ContactInfo: settings.ContactInfo, DocURL: settings.DocURL, HomeContent: settings.HomeContent, + HideCcsImportButton: settings.HideCcsImportButton, + PurchaseSubscriptionEnabled: settings.PurchaseSubscriptionEnabled, + PurchaseSubscriptionURL: settings.PurchaseSubscriptionURL, DefaultConcurrency: settings.DefaultConcurrency, DefaultBalance: settings.DefaultBalance, EnableModelFallback: settings.EnableModelFallback, @@ -87,8 +94,11 @@ func (h *SettingHandler) GetSettings(c *gin.Context) { // UpdateSettingsRequest 更新设置请求 type UpdateSettingsRequest struct { // 注册设置 - RegistrationEnabled bool `json:"registration_enabled"` - EmailVerifyEnabled bool `json:"email_verify_enabled"` + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + PromoCodeEnabled bool `json:"promo_code_enabled"` + PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证 // 邮件服务设置 SMTPHost string `json:"smtp_host"` @@ -111,13 +121,16 @@ type UpdateSettingsRequest struct { LinuxDoConnectRedirectURL string `json:"linuxdo_connect_redirect_url"` // OEM设置 - SiteName string `json:"site_name"` - SiteLogo string `json:"site_logo"` - SiteSubtitle string `json:"site_subtitle"` - APIBaseURL string `json:"api_base_url"` - ContactInfo string `json:"contact_info"` - DocURL string `json:"doc_url"` - HomeContent string `json:"home_content"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo"` + SiteSubtitle string `json:"site_subtitle"` + APIBaseURL string `json:"api_base_url"` + ContactInfo string `json:"contact_info"` + DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` + HideCcsImportButton bool `json:"hide_ccs_import_button"` + PurchaseSubscriptionEnabled *bool `json:"purchase_subscription_enabled"` + PurchaseSubscriptionURL *string `json:"purchase_subscription_url"` // 默认配置 DefaultConcurrency int `json:"default_concurrency"` @@ -194,6 +207,16 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { } } + // TOTP 双因素认证参数验证 + // 只有手动配置了加密密钥才允许启用 TOTP 功能 + if req.TotpEnabled && !previousSettings.TotpEnabled { + // 尝试启用 TOTP,检查加密密钥是否已手动配置 + if !h.settingService.IsTotpEncryptionKeyConfigured() { + response.BadRequest(c, "Cannot enable TOTP: TOTP_ENCRYPTION_KEY environment variable must be configured first. Generate a key with 'openssl rand -hex 32' and set it in your environment.") + return + } + } + // LinuxDo Connect 参数验证 if req.LinuxDoConnectEnabled { req.LinuxDoConnectClientID = strings.TrimSpace(req.LinuxDoConnectClientID) @@ -223,6 +246,34 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { } } + // “购买订阅”页面配置验证 + purchaseEnabled := previousSettings.PurchaseSubscriptionEnabled + if req.PurchaseSubscriptionEnabled != nil { + purchaseEnabled = *req.PurchaseSubscriptionEnabled + } + purchaseURL := previousSettings.PurchaseSubscriptionURL + if req.PurchaseSubscriptionURL != nil { + purchaseURL = strings.TrimSpace(*req.PurchaseSubscriptionURL) + } + + // - 启用时要求 URL 合法且非空 + // - 禁用时允许为空;若提供了 URL 也做基本校验,避免误配置 + if purchaseEnabled { + if purchaseURL == "" { + response.BadRequest(c, "Purchase Subscription URL is required when enabled") + return + } + if err := config.ValidateAbsoluteHTTPURL(purchaseURL); err != nil { + response.BadRequest(c, "Purchase Subscription URL must be an absolute http(s) URL") + return + } + } else if purchaseURL != "" { + if err := config.ValidateAbsoluteHTTPURL(purchaseURL); err != nil { + response.BadRequest(c, "Purchase Subscription URL must be an absolute http(s) URL") + return + } + } + // Ops metrics collector interval validation (seconds). if req.OpsMetricsIntervalSeconds != nil { v := *req.OpsMetricsIntervalSeconds @@ -236,38 +287,44 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { } settings := &service.SystemSettings{ - RegistrationEnabled: req.RegistrationEnabled, - EmailVerifyEnabled: req.EmailVerifyEnabled, - SMTPHost: req.SMTPHost, - SMTPPort: req.SMTPPort, - SMTPUsername: req.SMTPUsername, - SMTPPassword: req.SMTPPassword, - SMTPFrom: req.SMTPFrom, - SMTPFromName: req.SMTPFromName, - SMTPUseTLS: req.SMTPUseTLS, - TurnstileEnabled: req.TurnstileEnabled, - TurnstileSiteKey: req.TurnstileSiteKey, - TurnstileSecretKey: req.TurnstileSecretKey, - LinuxDoConnectEnabled: req.LinuxDoConnectEnabled, - LinuxDoConnectClientID: req.LinuxDoConnectClientID, - LinuxDoConnectClientSecret: req.LinuxDoConnectClientSecret, - LinuxDoConnectRedirectURL: req.LinuxDoConnectRedirectURL, - SiteName: req.SiteName, - SiteLogo: req.SiteLogo, - SiteSubtitle: req.SiteSubtitle, - APIBaseURL: req.APIBaseURL, - ContactInfo: req.ContactInfo, - DocURL: req.DocURL, - HomeContent: req.HomeContent, - DefaultConcurrency: req.DefaultConcurrency, - DefaultBalance: req.DefaultBalance, - EnableModelFallback: req.EnableModelFallback, - FallbackModelAnthropic: req.FallbackModelAnthropic, - FallbackModelOpenAI: req.FallbackModelOpenAI, - FallbackModelGemini: req.FallbackModelGemini, - FallbackModelAntigravity: req.FallbackModelAntigravity, - EnableIdentityPatch: req.EnableIdentityPatch, - IdentityPatchPrompt: req.IdentityPatchPrompt, + RegistrationEnabled: req.RegistrationEnabled, + EmailVerifyEnabled: req.EmailVerifyEnabled, + PromoCodeEnabled: req.PromoCodeEnabled, + PasswordResetEnabled: req.PasswordResetEnabled, + TotpEnabled: req.TotpEnabled, + SMTPHost: req.SMTPHost, + SMTPPort: req.SMTPPort, + SMTPUsername: req.SMTPUsername, + SMTPPassword: req.SMTPPassword, + SMTPFrom: req.SMTPFrom, + SMTPFromName: req.SMTPFromName, + SMTPUseTLS: req.SMTPUseTLS, + TurnstileEnabled: req.TurnstileEnabled, + TurnstileSiteKey: req.TurnstileSiteKey, + TurnstileSecretKey: req.TurnstileSecretKey, + LinuxDoConnectEnabled: req.LinuxDoConnectEnabled, + LinuxDoConnectClientID: req.LinuxDoConnectClientID, + LinuxDoConnectClientSecret: req.LinuxDoConnectClientSecret, + LinuxDoConnectRedirectURL: req.LinuxDoConnectRedirectURL, + SiteName: req.SiteName, + SiteLogo: req.SiteLogo, + SiteSubtitle: req.SiteSubtitle, + APIBaseURL: req.APIBaseURL, + ContactInfo: req.ContactInfo, + DocURL: req.DocURL, + HomeContent: req.HomeContent, + HideCcsImportButton: req.HideCcsImportButton, + PurchaseSubscriptionEnabled: purchaseEnabled, + PurchaseSubscriptionURL: purchaseURL, + DefaultConcurrency: req.DefaultConcurrency, + DefaultBalance: req.DefaultBalance, + EnableModelFallback: req.EnableModelFallback, + FallbackModelAnthropic: req.FallbackModelAnthropic, + FallbackModelOpenAI: req.FallbackModelOpenAI, + FallbackModelGemini: req.FallbackModelGemini, + FallbackModelAntigravity: req.FallbackModelAntigravity, + EnableIdentityPatch: req.EnableIdentityPatch, + IdentityPatchPrompt: req.IdentityPatchPrompt, OpsMonitoringEnabled: func() bool { if req.OpsMonitoringEnabled != nil { return *req.OpsMonitoringEnabled @@ -311,6 +368,10 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { response.Success(c, dto.SystemSettings{ RegistrationEnabled: updatedSettings.RegistrationEnabled, EmailVerifyEnabled: updatedSettings.EmailVerifyEnabled, + PromoCodeEnabled: updatedSettings.PromoCodeEnabled, + PasswordResetEnabled: updatedSettings.PasswordResetEnabled, + TotpEnabled: updatedSettings.TotpEnabled, + TotpEncryptionKeyConfigured: h.settingService.IsTotpEncryptionKeyConfigured(), SMTPHost: updatedSettings.SMTPHost, SMTPPort: updatedSettings.SMTPPort, SMTPUsername: updatedSettings.SMTPUsername, @@ -332,6 +393,9 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { ContactInfo: updatedSettings.ContactInfo, DocURL: updatedSettings.DocURL, HomeContent: updatedSettings.HomeContent, + HideCcsImportButton: updatedSettings.HideCcsImportButton, + PurchaseSubscriptionEnabled: updatedSettings.PurchaseSubscriptionEnabled, + PurchaseSubscriptionURL: updatedSettings.PurchaseSubscriptionURL, DefaultConcurrency: updatedSettings.DefaultConcurrency, DefaultBalance: updatedSettings.DefaultBalance, EnableModelFallback: updatedSettings.EnableModelFallback, @@ -376,6 +440,12 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings, if before.EmailVerifyEnabled != after.EmailVerifyEnabled { changed = append(changed, "email_verify_enabled") } + if before.PasswordResetEnabled != after.PasswordResetEnabled { + changed = append(changed, "password_reset_enabled") + } + if before.TotpEnabled != after.TotpEnabled { + changed = append(changed, "totp_enabled") + } if before.SMTPHost != after.SMTPHost { changed = append(changed, "smtp_host") } @@ -439,6 +509,9 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings, if before.HomeContent != after.HomeContent { changed = append(changed, "home_content") } + if before.HideCcsImportButton != after.HideCcsImportButton { + changed = append(changed, "hide_ccs_import_button") + } if before.DefaultConcurrency != after.DefaultConcurrency { changed = append(changed, "default_concurrency") } diff --git a/backend/internal/handler/admin/subscription_handler.go b/backend/internal/handler/admin/subscription_handler.go index 08db999a..51995ab1 100644 --- a/backend/internal/handler/admin/subscription_handler.go +++ b/backend/internal/handler/admin/subscription_handler.go @@ -53,9 +53,9 @@ type BulkAssignSubscriptionRequest struct { Notes string `json:"notes"` } -// ExtendSubscriptionRequest represents extend subscription request -type ExtendSubscriptionRequest struct { - Days int `json:"days" binding:"required,min=1,max=36500"` // max 100 years +// AdjustSubscriptionRequest represents adjust subscription request (extend or shorten) +type AdjustSubscriptionRequest struct { + Days int `json:"days" binding:"required,min=-36500,max=36500"` // negative to shorten, positive to extend } // List handles listing all subscriptions with pagination and filters @@ -77,15 +77,19 @@ func (h *SubscriptionHandler) List(c *gin.Context) { } status := c.Query("status") - subscriptions, pagination, err := h.subscriptionService.List(c.Request.Context(), page, pageSize, userID, groupID, status) + // Parse sorting parameters + sortBy := c.DefaultQuery("sort_by", "created_at") + sortOrder := c.DefaultQuery("sort_order", "desc") + + subscriptions, pagination, err := h.subscriptionService.List(c.Request.Context(), page, pageSize, userID, groupID, status, sortBy, sortOrder) if err != nil { response.ErrorFrom(c, err) return } - out := make([]dto.UserSubscription, 0, len(subscriptions)) + out := make([]dto.AdminUserSubscription, 0, len(subscriptions)) for i := range subscriptions { - out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + out = append(out, *dto.UserSubscriptionFromServiceAdmin(&subscriptions[i])) } response.PaginatedWithResult(c, out, toResponsePagination(pagination)) } @@ -105,7 +109,7 @@ func (h *SubscriptionHandler) GetByID(c *gin.Context) { return } - response.Success(c, dto.UserSubscriptionFromService(subscription)) + response.Success(c, dto.UserSubscriptionFromServiceAdmin(subscription)) } // GetProgress handles getting subscription usage progress @@ -150,7 +154,7 @@ func (h *SubscriptionHandler) Assign(c *gin.Context) { return } - response.Success(c, dto.UserSubscriptionFromService(subscription)) + response.Success(c, dto.UserSubscriptionFromServiceAdmin(subscription)) } // BulkAssign handles bulk assigning subscriptions to multiple users @@ -180,7 +184,7 @@ func (h *SubscriptionHandler) BulkAssign(c *gin.Context) { response.Success(c, dto.BulkAssignResultFromService(result)) } -// Extend handles extending a subscription +// Extend handles adjusting a subscription (extend or shorten) // POST /api/v1/admin/subscriptions/:id/extend func (h *SubscriptionHandler) Extend(c *gin.Context) { subscriptionID, err := strconv.ParseInt(c.Param("id"), 10, 64) @@ -189,7 +193,7 @@ func (h *SubscriptionHandler) Extend(c *gin.Context) { return } - var req ExtendSubscriptionRequest + var req AdjustSubscriptionRequest if err := c.ShouldBindJSON(&req); err != nil { response.BadRequest(c, "Invalid request: "+err.Error()) return @@ -201,7 +205,7 @@ func (h *SubscriptionHandler) Extend(c *gin.Context) { return } - response.Success(c, dto.UserSubscriptionFromService(subscription)) + response.Success(c, dto.UserSubscriptionFromServiceAdmin(subscription)) } // Revoke handles revoking a subscription @@ -239,9 +243,9 @@ func (h *SubscriptionHandler) ListByGroup(c *gin.Context) { return } - out := make([]dto.UserSubscription, 0, len(subscriptions)) + out := make([]dto.AdminUserSubscription, 0, len(subscriptions)) for i := range subscriptions { - out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + out = append(out, *dto.UserSubscriptionFromServiceAdmin(&subscriptions[i])) } response.PaginatedWithResult(c, out, toResponsePagination(pagination)) } @@ -261,9 +265,9 @@ func (h *SubscriptionHandler) ListByUser(c *gin.Context) { return } - out := make([]dto.UserSubscription, 0, len(subscriptions)) + out := make([]dto.AdminUserSubscription, 0, len(subscriptions)) for i := range subscriptions { - out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + out = append(out, *dto.UserSubscriptionFromServiceAdmin(&subscriptions[i])) } response.Success(c, out) } diff --git a/backend/internal/handler/admin/usage_cleanup_handler_test.go b/backend/internal/handler/admin/usage_cleanup_handler_test.go new file mode 100644 index 00000000..ed1c7cc2 --- /dev/null +++ b/backend/internal/handler/admin/usage_cleanup_handler_test.go @@ -0,0 +1,377 @@ +package admin + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type cleanupRepoStub struct { + mu sync.Mutex + created []*service.UsageCleanupTask + listTasks []service.UsageCleanupTask + listResult *pagination.PaginationResult + listErr error + statusByID map[int64]string +} + +func (s *cleanupRepoStub) CreateTask(ctx context.Context, task *service.UsageCleanupTask) error { + if task == nil { + return nil + } + s.mu.Lock() + defer s.mu.Unlock() + if task.ID == 0 { + task.ID = int64(len(s.created) + 1) + } + if task.CreatedAt.IsZero() { + task.CreatedAt = time.Now().UTC() + } + task.UpdatedAt = task.CreatedAt + clone := *task + s.created = append(s.created, &clone) + return nil +} + +func (s *cleanupRepoStub) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.listTasks, s.listResult, s.listErr +} + +func (s *cleanupRepoStub) ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*service.UsageCleanupTask, error) { + return nil, nil +} + +func (s *cleanupRepoStub) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.statusByID == nil { + return "", sql.ErrNoRows + } + status, ok := s.statusByID[taskID] + if !ok { + return "", sql.ErrNoRows + } + return status, nil +} + +func (s *cleanupRepoStub) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error { + return nil +} + +func (s *cleanupRepoStub) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + status := s.statusByID[taskID] + if status != service.UsageCleanupStatusPending && status != service.UsageCleanupStatusRunning { + return false, nil + } + s.statusByID[taskID] = service.UsageCleanupStatusCanceled + return true, nil +} + +func (s *cleanupRepoStub) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error { + return nil +} + +func (s *cleanupRepoStub) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + return nil +} + +func (s *cleanupRepoStub) DeleteUsageLogsBatch(ctx context.Context, filters service.UsageCleanupFilters, limit int) (int64, error) { + return 0, nil +} + +var _ service.UsageCleanupRepository = (*cleanupRepoStub)(nil) + +func setupCleanupRouter(cleanupService *service.UsageCleanupService, userID int64) *gin.Engine { + gin.SetMode(gin.TestMode) + router := gin.New() + if userID > 0 { + router.Use(func(c *gin.Context) { + c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{UserID: userID}) + c.Next() + }) + } + + handler := NewUsageHandler(nil, nil, nil, cleanupService) + router.POST("/api/v1/admin/usage/cleanup-tasks", handler.CreateCleanupTask) + router.GET("/api/v1/admin/usage/cleanup-tasks", handler.ListCleanupTasks) + router.POST("/api/v1/admin/usage/cleanup-tasks/:id/cancel", handler.CancelCleanupTask) + return router +} + +func TestUsageHandlerCreateCleanupTaskUnauthorized(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 0) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewBufferString(`{}`)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusUnauthorized, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskUnavailable(t *testing.T) { + router := setupCleanupRouter(nil, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewBufferString(`{}`)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusServiceUnavailable, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskBindError(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewBufferString("{bad-json")) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskMissingRange(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + payload := map[string]any{ + "start_date": "2024-01-01", + "timezone": "UTC", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskInvalidDate(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + payload := map[string]any{ + "start_date": "2024-13-01", + "end_date": "2024-01-02", + "timezone": "UTC", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskInvalidEndDate(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + payload := map[string]any{ + "start_date": "2024-01-01", + "end_date": "2024-02-40", + "timezone": "UTC", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskSuccess(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 99) + + payload := map[string]any{ + "start_date": " 2024-01-01 ", + "end_date": "2024-01-02", + "timezone": "UTC", + "model": "gpt-4", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusOK, recorder.Code) + + var resp response.Response + require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp)) + require.Equal(t, 0, resp.Code) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.created, 1) + created := repo.created[0] + require.Equal(t, int64(99), created.CreatedBy) + require.NotNil(t, created.Filters.Model) + require.Equal(t, "gpt-4", *created.Filters.Model) + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC).Add(24*time.Hour - time.Nanosecond) + require.True(t, created.Filters.StartTime.Equal(start)) + require.True(t, created.Filters.EndTime.Equal(end)) +} + +func TestUsageHandlerListCleanupTasksUnavailable(t *testing.T) { + router := setupCleanupRouter(nil, 0) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/usage/cleanup-tasks", nil) + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusServiceUnavailable, recorder.Code) +} + +func TestUsageHandlerListCleanupTasksSuccess(t *testing.T) { + repo := &cleanupRepoStub{} + repo.listTasks = []service.UsageCleanupTask{ + { + ID: 7, + Status: service.UsageCleanupStatusSucceeded, + CreatedBy: 4, + }, + } + repo.listResult = &pagination.PaginationResult{Total: 1, Page: 1, PageSize: 20, Pages: 1} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/usage/cleanup-tasks", nil) + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusOK, recorder.Code) + + var resp struct { + Code int `json:"code"` + Data struct { + Items []dto.UsageCleanupTask `json:"items"` + Total int64 `json:"total"` + Page int `json:"page"` + } `json:"data"` + } + require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp)) + require.Equal(t, 0, resp.Code) + require.Len(t, resp.Data.Items, 1) + require.Equal(t, int64(7), resp.Data.Items[0].ID) + require.Equal(t, int64(1), resp.Data.Total) + require.Equal(t, 1, resp.Data.Page) +} + +func TestUsageHandlerListCleanupTasksError(t *testing.T) { + repo := &cleanupRepoStub{listErr: errors.New("boom")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/usage/cleanup-tasks", nil) + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusInternalServerError, recorder.Code) +} + +func TestUsageHandlerCancelCleanupTaskUnauthorized(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 0) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/1/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) +} + +func TestUsageHandlerCancelCleanupTaskNotFound(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/999/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusNotFound, rec.Code) +} + +func TestUsageHandlerCancelCleanupTaskConflict(t *testing.T) { + repo := &cleanupRepoStub{statusByID: map[int64]string{2: service.UsageCleanupStatusSucceeded}} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/2/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusConflict, rec.Code) +} + +func TestUsageHandlerCancelCleanupTaskSuccess(t *testing.T) { + repo := &cleanupRepoStub{statusByID: map[int64]string{3: service.UsageCleanupStatusPending}} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 1) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks/3/cancel", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) +} diff --git a/backend/internal/handler/admin/usage_handler.go b/backend/internal/handler/admin/usage_handler.go index c7b983f1..3f3238dd 100644 --- a/backend/internal/handler/admin/usage_handler.go +++ b/backend/internal/handler/admin/usage_handler.go @@ -1,7 +1,10 @@ package admin import ( + "log" + "net/http" "strconv" + "strings" "time" "github.com/Wei-Shaw/sub2api/internal/handler/dto" @@ -9,6 +12,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/pkg/response" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/gin-gonic/gin" @@ -16,9 +20,10 @@ import ( // UsageHandler handles admin usage-related requests type UsageHandler struct { - usageService *service.UsageService - apiKeyService *service.APIKeyService - adminService service.AdminService + usageService *service.UsageService + apiKeyService *service.APIKeyService + adminService service.AdminService + cleanupService *service.UsageCleanupService } // NewUsageHandler creates a new admin usage handler @@ -26,14 +31,30 @@ func NewUsageHandler( usageService *service.UsageService, apiKeyService *service.APIKeyService, adminService service.AdminService, + cleanupService *service.UsageCleanupService, ) *UsageHandler { return &UsageHandler{ - usageService: usageService, - apiKeyService: apiKeyService, - adminService: adminService, + usageService: usageService, + apiKeyService: apiKeyService, + adminService: adminService, + cleanupService: cleanupService, } } +// CreateUsageCleanupTaskRequest represents cleanup task creation request +type CreateUsageCleanupTaskRequest struct { + StartDate string `json:"start_date"` + EndDate string `json:"end_date"` + UserID *int64 `json:"user_id"` + APIKeyID *int64 `json:"api_key_id"` + AccountID *int64 `json:"account_id"` + GroupID *int64 `json:"group_id"` + Model *string `json:"model"` + Stream *bool `json:"stream"` + BillingType *int8 `json:"billing_type"` + Timezone string `json:"timezone"` +} + // List handles listing all usage records with filters // GET /api/v1/admin/usage func (h *UsageHandler) List(c *gin.Context) { @@ -142,7 +163,7 @@ func (h *UsageHandler) List(c *gin.Context) { return } - out := make([]dto.UsageLog, 0, len(records)) + out := make([]dto.AdminUsageLog, 0, len(records)) for i := range records { out = append(out, *dto.UsageLogFromServiceAdmin(&records[i])) } @@ -344,3 +365,162 @@ func (h *UsageHandler) SearchAPIKeys(c *gin.Context) { response.Success(c, result) } + +// ListCleanupTasks handles listing usage cleanup tasks +// GET /api/v1/admin/usage/cleanup-tasks +func (h *UsageHandler) ListCleanupTasks(c *gin.Context) { + if h.cleanupService == nil { + response.Error(c, http.StatusServiceUnavailable, "Usage cleanup service unavailable") + return + } + operator := int64(0) + if subject, ok := middleware.GetAuthSubjectFromContext(c); ok { + operator = subject.UserID + } + page, pageSize := response.ParsePagination(c) + log.Printf("[UsageCleanup] 请求清理任务列表: operator=%d page=%d page_size=%d", operator, page, pageSize) + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + tasks, result, err := h.cleanupService.ListTasks(c.Request.Context(), params) + if err != nil { + log.Printf("[UsageCleanup] 查询清理任务列表失败: operator=%d page=%d page_size=%d err=%v", operator, page, pageSize, err) + response.ErrorFrom(c, err) + return + } + out := make([]dto.UsageCleanupTask, 0, len(tasks)) + for i := range tasks { + out = append(out, *dto.UsageCleanupTaskFromService(&tasks[i])) + } + log.Printf("[UsageCleanup] 返回清理任务列表: operator=%d total=%d items=%d page=%d page_size=%d", operator, result.Total, len(out), page, pageSize) + response.Paginated(c, out, result.Total, page, pageSize) +} + +// CreateCleanupTask handles creating a usage cleanup task +// POST /api/v1/admin/usage/cleanup-tasks +func (h *UsageHandler) CreateCleanupTask(c *gin.Context) { + if h.cleanupService == nil { + response.Error(c, http.StatusServiceUnavailable, "Usage cleanup service unavailable") + return + } + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Unauthorized(c, "Unauthorized") + return + } + + var req CreateUsageCleanupTaskRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + req.StartDate = strings.TrimSpace(req.StartDate) + req.EndDate = strings.TrimSpace(req.EndDate) + if req.StartDate == "" || req.EndDate == "" { + response.BadRequest(c, "start_date and end_date are required") + return + } + + startTime, err := timezone.ParseInUserLocation("2006-01-02", req.StartDate, req.Timezone) + if err != nil { + response.BadRequest(c, "Invalid start_date format, use YYYY-MM-DD") + return + } + endTime, err := timezone.ParseInUserLocation("2006-01-02", req.EndDate, req.Timezone) + if err != nil { + response.BadRequest(c, "Invalid end_date format, use YYYY-MM-DD") + return + } + endTime = endTime.Add(24*time.Hour - time.Nanosecond) + + filters := service.UsageCleanupFilters{ + StartTime: startTime, + EndTime: endTime, + UserID: req.UserID, + APIKeyID: req.APIKeyID, + AccountID: req.AccountID, + GroupID: req.GroupID, + Model: req.Model, + Stream: req.Stream, + BillingType: req.BillingType, + } + + var userID any + if filters.UserID != nil { + userID = *filters.UserID + } + var apiKeyID any + if filters.APIKeyID != nil { + apiKeyID = *filters.APIKeyID + } + var accountID any + if filters.AccountID != nil { + accountID = *filters.AccountID + } + var groupID any + if filters.GroupID != nil { + groupID = *filters.GroupID + } + var model any + if filters.Model != nil { + model = *filters.Model + } + var stream any + if filters.Stream != nil { + stream = *filters.Stream + } + var billingType any + if filters.BillingType != nil { + billingType = *filters.BillingType + } + + log.Printf("[UsageCleanup] 请求创建清理任务: operator=%d start=%s end=%s user_id=%v api_key_id=%v account_id=%v group_id=%v model=%v stream=%v billing_type=%v tz=%q", + subject.UserID, + filters.StartTime.Format(time.RFC3339), + filters.EndTime.Format(time.RFC3339), + userID, + apiKeyID, + accountID, + groupID, + model, + stream, + billingType, + req.Timezone, + ) + + task, err := h.cleanupService.CreateTask(c.Request.Context(), filters, subject.UserID) + if err != nil { + log.Printf("[UsageCleanup] 创建清理任务失败: operator=%d err=%v", subject.UserID, err) + response.ErrorFrom(c, err) + return + } + + log.Printf("[UsageCleanup] 清理任务已创建: task=%d operator=%d status=%s", task.ID, subject.UserID, task.Status) + response.Success(c, dto.UsageCleanupTaskFromService(task)) +} + +// CancelCleanupTask handles canceling a usage cleanup task +// POST /api/v1/admin/usage/cleanup-tasks/:id/cancel +func (h *UsageHandler) CancelCleanupTask(c *gin.Context) { + if h.cleanupService == nil { + response.Error(c, http.StatusServiceUnavailable, "Usage cleanup service unavailable") + return + } + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Unauthorized(c, "Unauthorized") + return + } + idStr := strings.TrimSpace(c.Param("id")) + taskID, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || taskID <= 0 { + response.BadRequest(c, "Invalid task id") + return + } + log.Printf("[UsageCleanup] 请求取消清理任务: task=%d operator=%d", taskID, subject.UserID) + if err := h.cleanupService.CancelTask(c.Request.Context(), taskID, subject.UserID); err != nil { + log.Printf("[UsageCleanup] 取消清理任务失败: task=%d operator=%d err=%v", taskID, subject.UserID, err) + response.ErrorFrom(c, err) + return + } + log.Printf("[UsageCleanup] 清理任务已取消: task=%d operator=%d", taskID, subject.UserID) + response.Success(c, gin.H{"id": taskID, "status": service.UsageCleanupStatusCanceled}) +} diff --git a/backend/internal/handler/admin/user_handler.go b/backend/internal/handler/admin/user_handler.go index 38cc8acd..9a5a691f 100644 --- a/backend/internal/handler/admin/user_handler.go +++ b/backend/internal/handler/admin/user_handler.go @@ -84,9 +84,9 @@ func (h *UserHandler) List(c *gin.Context) { return } - out := make([]dto.User, 0, len(users)) + out := make([]dto.AdminUser, 0, len(users)) for i := range users { - out = append(out, *dto.UserFromService(&users[i])) + out = append(out, *dto.UserFromServiceAdmin(&users[i])) } response.Paginated(c, out, total, page, pageSize) } @@ -129,7 +129,7 @@ func (h *UserHandler) GetByID(c *gin.Context) { return } - response.Success(c, dto.UserFromService(user)) + response.Success(c, dto.UserFromServiceAdmin(user)) } // Create handles creating a new user @@ -155,7 +155,7 @@ func (h *UserHandler) Create(c *gin.Context) { return } - response.Success(c, dto.UserFromService(user)) + response.Success(c, dto.UserFromServiceAdmin(user)) } // Update handles updating a user @@ -189,7 +189,7 @@ func (h *UserHandler) Update(c *gin.Context) { return } - response.Success(c, dto.UserFromService(user)) + response.Success(c, dto.UserFromServiceAdmin(user)) } // Delete handles deleting a user @@ -231,7 +231,7 @@ func (h *UserHandler) UpdateBalance(c *gin.Context) { return } - response.Success(c, dto.UserFromService(user)) + response.Success(c, dto.UserFromServiceAdmin(user)) } // GetUserAPIKeys handles getting user's API keys diff --git a/backend/internal/handler/announcement_handler.go b/backend/internal/handler/announcement_handler.go new file mode 100644 index 00000000..72823eaf --- /dev/null +++ b/backend/internal/handler/announcement_handler.go @@ -0,0 +1,81 @@ +package handler + +import ( + "strconv" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// AnnouncementHandler handles user announcement operations +type AnnouncementHandler struct { + announcementService *service.AnnouncementService +} + +// NewAnnouncementHandler creates a new user announcement handler +func NewAnnouncementHandler(announcementService *service.AnnouncementService) *AnnouncementHandler { + return &AnnouncementHandler{ + announcementService: announcementService, + } +} + +// List handles listing announcements visible to current user +// GET /api/v1/announcements +func (h *AnnouncementHandler) List(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + unreadOnly := parseBoolQuery(c.Query("unread_only")) + + items, err := h.announcementService.ListForUser(c.Request.Context(), subject.UserID, unreadOnly) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UserAnnouncement, 0, len(items)) + for i := range items { + out = append(out, *dto.UserAnnouncementFromService(&items[i])) + } + response.Success(c, out) +} + +// MarkRead marks an announcement as read for current user +// POST /api/v1/announcements/:id/read +func (h *AnnouncementHandler) MarkRead(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + announcementID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || announcementID <= 0 { + response.BadRequest(c, "Invalid announcement ID") + return + } + + if err := h.announcementService.MarkRead(c.Request.Context(), subject.UserID, announcementID); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "ok"}) +} + +func parseBoolQuery(v string) bool { + switch strings.TrimSpace(strings.ToLower(v)) { + case "1", "true", "yes", "y", "on": + return true + default: + return false + } +} diff --git a/backend/internal/handler/auth_handler.go b/backend/internal/handler/auth_handler.go index 882e4cf2..3522407d 100644 --- a/backend/internal/handler/auth_handler.go +++ b/backend/internal/handler/auth_handler.go @@ -1,6 +1,8 @@ package handler import ( + "log/slog" + "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler/dto" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" @@ -18,16 +20,18 @@ type AuthHandler struct { userService *service.UserService settingSvc *service.SettingService promoService *service.PromoService + totpService *service.TotpService } // NewAuthHandler creates a new AuthHandler -func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService, promoService *service.PromoService) *AuthHandler { +func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService, promoService *service.PromoService, totpService *service.TotpService) *AuthHandler { return &AuthHandler{ cfg: cfg, authService: authService, userService: userService, settingSvc: settingService, promoService: promoService, + totpService: totpService, } } @@ -144,6 +148,100 @@ func (h *AuthHandler) Login(c *gin.Context) { return } + // Check if TOTP 2FA is enabled for this user + if h.totpService != nil && h.settingSvc.IsTotpEnabled(c.Request.Context()) && user.TotpEnabled { + // Create a temporary login session for 2FA + tempToken, err := h.totpService.CreateLoginSession(c.Request.Context(), user.ID, user.Email) + if err != nil { + response.InternalError(c, "Failed to create 2FA session") + return + } + + response.Success(c, TotpLoginResponse{ + Requires2FA: true, + TempToken: tempToken, + UserEmailMasked: service.MaskEmail(user.Email), + }) + return + } + + response.Success(c, AuthResponse{ + AccessToken: token, + TokenType: "Bearer", + User: dto.UserFromService(user), + }) +} + +// TotpLoginResponse represents the response when 2FA is required +type TotpLoginResponse struct { + Requires2FA bool `json:"requires_2fa"` + TempToken string `json:"temp_token,omitempty"` + UserEmailMasked string `json:"user_email_masked,omitempty"` +} + +// Login2FARequest represents the 2FA login request +type Login2FARequest struct { + TempToken string `json:"temp_token" binding:"required"` + TotpCode string `json:"totp_code" binding:"required,len=6"` +} + +// Login2FA completes the login with 2FA verification +// POST /api/v1/auth/login/2fa +func (h *AuthHandler) Login2FA(c *gin.Context) { + var req Login2FARequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + slog.Debug("login_2fa_request", + "temp_token_len", len(req.TempToken), + "totp_code_len", len(req.TotpCode)) + + // Get the login session + session, err := h.totpService.GetLoginSession(c.Request.Context(), req.TempToken) + if err != nil || session == nil { + tokenPrefix := "" + if len(req.TempToken) >= 8 { + tokenPrefix = req.TempToken[:8] + } + slog.Debug("login_2fa_session_invalid", + "temp_token_prefix", tokenPrefix, + "error", err) + response.BadRequest(c, "Invalid or expired 2FA session") + return + } + + slog.Debug("login_2fa_session_found", + "user_id", session.UserID, + "email", session.Email) + + // Verify the TOTP code + if err := h.totpService.VerifyCode(c.Request.Context(), session.UserID, req.TotpCode); err != nil { + slog.Debug("login_2fa_verify_failed", + "user_id", session.UserID, + "error", err) + response.ErrorFrom(c, err) + return + } + + // Delete the login session + _ = h.totpService.DeleteLoginSession(c.Request.Context(), req.TempToken) + + // Get the user + user, err := h.userService.GetByID(c.Request.Context(), session.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Generate the JWT token + token, err := h.authService.GenerateToken(user) + if err != nil { + response.InternalError(c, "Failed to generate token") + return + } + response.Success(c, AuthResponse{ AccessToken: token, TokenType: "Bearer", @@ -195,6 +293,15 @@ type ValidatePromoCodeResponse struct { // ValidatePromoCode 验证优惠码(公开接口,注册前调用) // POST /api/v1/auth/validate-promo-code func (h *AuthHandler) ValidatePromoCode(c *gin.Context) { + // 检查优惠码功能是否启用 + if h.settingSvc != nil && !h.settingSvc.IsPromoCodeEnabled(c.Request.Context()) { + response.Success(c, ValidatePromoCodeResponse{ + Valid: false, + ErrorCode: "PROMO_CODE_DISABLED", + }) + return + } + var req ValidatePromoCodeRequest if err := c.ShouldBindJSON(&req); err != nil { response.BadRequest(c, "Invalid request: "+err.Error()) @@ -238,3 +345,85 @@ func (h *AuthHandler) ValidatePromoCode(c *gin.Context) { BonusAmount: promoCode.BonusAmount, }) } + +// ForgotPasswordRequest 忘记密码请求 +type ForgotPasswordRequest struct { + Email string `json:"email" binding:"required,email"` + TurnstileToken string `json:"turnstile_token"` +} + +// ForgotPasswordResponse 忘记密码响应 +type ForgotPasswordResponse struct { + Message string `json:"message"` +} + +// ForgotPassword 请求密码重置 +// POST /api/v1/auth/forgot-password +func (h *AuthHandler) ForgotPassword(c *gin.Context) { + var req ForgotPasswordRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Turnstile 验证 + if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { + response.ErrorFrom(c, err) + return + } + + // Build frontend base URL from request + scheme := "https" + if c.Request.TLS == nil { + // Check X-Forwarded-Proto header (common in reverse proxy setups) + if proto := c.GetHeader("X-Forwarded-Proto"); proto != "" { + scheme = proto + } else { + scheme = "http" + } + } + frontendBaseURL := scheme + "://" + c.Request.Host + + // Request password reset (async) + // Note: This returns success even if email doesn't exist (to prevent enumeration) + if err := h.authService.RequestPasswordResetAsync(c.Request.Context(), req.Email, frontendBaseURL); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, ForgotPasswordResponse{ + Message: "If your email is registered, you will receive a password reset link shortly.", + }) +} + +// ResetPasswordRequest 重置密码请求 +type ResetPasswordRequest struct { + Email string `json:"email" binding:"required,email"` + Token string `json:"token" binding:"required"` + NewPassword string `json:"new_password" binding:"required,min=6"` +} + +// ResetPasswordResponse 重置密码响应 +type ResetPasswordResponse struct { + Message string `json:"message"` +} + +// ResetPassword 重置密码 +// POST /api/v1/auth/reset-password +func (h *AuthHandler) ResetPassword(c *gin.Context) { + var req ResetPasswordRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Reset password + if err := h.authService.ResetPassword(c.Request.Context(), req.Email, req.Token, req.NewPassword); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, ResetPasswordResponse{ + Message: "Your password has been reset successfully. You can now log in with your new password.", + }) +} diff --git a/backend/internal/handler/dto/announcement.go b/backend/internal/handler/dto/announcement.go new file mode 100644 index 00000000..bc0db1b2 --- /dev/null +++ b/backend/internal/handler/dto/announcement.go @@ -0,0 +1,74 @@ +package dto + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type Announcement struct { + ID int64 `json:"id"` + Title string `json:"title"` + Content string `json:"content"` + Status string `json:"status"` + + Targeting service.AnnouncementTargeting `json:"targeting"` + + StartsAt *time.Time `json:"starts_at,omitempty"` + EndsAt *time.Time `json:"ends_at,omitempty"` + + CreatedBy *int64 `json:"created_by,omitempty"` + UpdatedBy *int64 `json:"updated_by,omitempty"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type UserAnnouncement struct { + ID int64 `json:"id"` + Title string `json:"title"` + Content string `json:"content"` + + StartsAt *time.Time `json:"starts_at,omitempty"` + EndsAt *time.Time `json:"ends_at,omitempty"` + + ReadAt *time.Time `json:"read_at,omitempty"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func AnnouncementFromService(a *service.Announcement) *Announcement { + if a == nil { + return nil + } + return &Announcement{ + ID: a.ID, + Title: a.Title, + Content: a.Content, + Status: a.Status, + Targeting: a.Targeting, + StartsAt: a.StartsAt, + EndsAt: a.EndsAt, + CreatedBy: a.CreatedBy, + UpdatedBy: a.UpdatedBy, + CreatedAt: a.CreatedAt, + UpdatedAt: a.UpdatedAt, + } +} + +func UserAnnouncementFromService(a *service.UserAnnouncement) *UserAnnouncement { + if a == nil { + return nil + } + return &UserAnnouncement{ + ID: a.Announcement.ID, + Title: a.Announcement.Title, + Content: a.Announcement.Content, + StartsAt: a.Announcement.StartsAt, + EndsAt: a.Announcement.EndsAt, + ReadAt: a.ReadAt, + CreatedAt: a.Announcement.CreatedAt, + UpdatedAt: a.Announcement.UpdatedAt, + } +} diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index e8420336..101c44c9 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -15,7 +15,6 @@ func UserFromServiceShallow(u *service.User) *User { ID: u.ID, Email: u.Email, Username: u.Username, - Notes: u.Notes, Role: u.Role, Balance: u.Balance, Concurrency: u.Concurrency, @@ -48,6 +47,22 @@ func UserFromService(u *service.User) *User { return out } +// UserFromServiceAdmin converts a service User to DTO for admin users. +// It includes notes - user-facing endpoints must not use this. +func UserFromServiceAdmin(u *service.User) *AdminUser { + if u == nil { + return nil + } + base := UserFromService(u) + if base == nil { + return nil + } + return &AdminUser{ + User: *base, + Notes: u.Notes, + } +} + func APIKeyFromService(k *service.APIKey) *APIKey { if k == nil { return nil @@ -72,38 +87,31 @@ func GroupFromServiceShallow(g *service.Group) *Group { if g == nil { return nil } - return &Group{ - ID: g.ID, - Name: g.Name, - Description: g.Description, - Platform: g.Platform, - RateMultiplier: g.RateMultiplier, - IsExclusive: g.IsExclusive, - Status: g.Status, - SubscriptionType: g.SubscriptionType, - DailyLimitUSD: g.DailyLimitUSD, - WeeklyLimitUSD: g.WeeklyLimitUSD, - MonthlyLimitUSD: g.MonthlyLimitUSD, - ImagePrice1K: g.ImagePrice1K, - ImagePrice2K: g.ImagePrice2K, - ImagePrice4K: g.ImagePrice4K, - ClaudeCodeOnly: g.ClaudeCodeOnly, - FallbackGroupID: g.FallbackGroupID, - FallbackGroupIDOnInvalidRequest: g.FallbackGroupIDOnInvalidRequest, - ModelRouting: g.ModelRouting, - ModelRoutingEnabled: g.ModelRoutingEnabled, - MCPXMLInject: g.MCPXMLInject, - CreatedAt: g.CreatedAt, - UpdatedAt: g.UpdatedAt, - AccountCount: g.AccountCount, - } + out := groupFromServiceBase(g) + return &out } func GroupFromService(g *service.Group) *Group { if g == nil { return nil } - out := GroupFromServiceShallow(g) + return GroupFromServiceShallow(g) +} + +// GroupFromServiceAdmin converts a service Group to DTO for admin users. +// It includes internal fields like model_routing and account_count. +func GroupFromServiceAdmin(g *service.Group) *AdminGroup { + if g == nil { + return nil + } + out := &AdminGroup{ + Group: groupFromServiceBase(g), + ModelRouting: g.ModelRouting, + ModelRoutingEnabled: g.ModelRoutingEnabled, + MCPXMLInject: g.MCPXMLInject, + SupportedModelScopes: g.SupportedModelScopes, + AccountCount: g.AccountCount, + } if len(g.AccountGroups) > 0 { out.AccountGroups = make([]AccountGroup, 0, len(g.AccountGroups)) for i := range g.AccountGroups { @@ -114,6 +122,31 @@ func GroupFromService(g *service.Group) *Group { return out } +func groupFromServiceBase(g *service.Group) Group { + return Group{ + ID: g.ID, + Name: g.Name, + Description: g.Description, + Platform: g.Platform, + RateMultiplier: g.RateMultiplier, + IsExclusive: g.IsExclusive, + Status: g.Status, + SubscriptionType: g.SubscriptionType, + DailyLimitUSD: g.DailyLimitUSD, + WeeklyLimitUSD: g.WeeklyLimitUSD, + MonthlyLimitUSD: g.MonthlyLimitUSD, + ImagePrice1K: g.ImagePrice1K, + ImagePrice2K: g.ImagePrice2K, + ImagePrice4K: g.ImagePrice4K, + ClaudeCodeOnly: g.ClaudeCodeOnly, + FallbackGroupID: g.FallbackGroupID, + // 无效请求兜底分组 + FallbackGroupIDOnInvalidRequest: g.FallbackGroupIDOnInvalidRequest, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, + } +} + func AccountFromServiceShallow(a *service.Account) *Account { if a == nil { return nil @@ -163,6 +196,16 @@ func AccountFromServiceShallow(a *service.Account) *Account { if idleTimeout := a.GetSessionIdleTimeoutMinutes(); idleTimeout > 0 { out.SessionIdleTimeoutMin = &idleTimeout } + // TLS指纹伪装开关 + if a.IsTLSFingerprintEnabled() { + enabled := true + out.EnableTLSFingerprint = &enabled + } + // 会话ID伪装开关 + if a.IsSessionIDMaskingEnabled() { + enabled := true + out.EnableSessionIDMasking = &enabled + } } if scopeLimits := a.GetAntigravityScopeRateLimits(); len(scopeLimits) > 0 { @@ -276,7 +319,24 @@ func RedeemCodeFromService(rc *service.RedeemCode) *RedeemCode { if rc == nil { return nil } - return &RedeemCode{ + out := redeemCodeFromServiceBase(rc) + return &out +} + +// RedeemCodeFromServiceAdmin converts a service RedeemCode to DTO for admin users. +// It includes notes - user-facing endpoints must not use this. +func RedeemCodeFromServiceAdmin(rc *service.RedeemCode) *AdminRedeemCode { + if rc == nil { + return nil + } + return &AdminRedeemCode{ + RedeemCode: redeemCodeFromServiceBase(rc), + Notes: rc.Notes, + } +} + +func redeemCodeFromServiceBase(rc *service.RedeemCode) RedeemCode { + out := RedeemCode{ ID: rc.ID, Code: rc.Code, Type: rc.Type, @@ -284,13 +344,20 @@ func RedeemCodeFromService(rc *service.RedeemCode) *RedeemCode { Status: rc.Status, UsedBy: rc.UsedBy, UsedAt: rc.UsedAt, - Notes: rc.Notes, CreatedAt: rc.CreatedAt, GroupID: rc.GroupID, ValidityDays: rc.ValidityDays, User: UserFromServiceShallow(rc.User), Group: GroupFromServiceShallow(rc.Group), } + + // For admin_balance/admin_concurrency types, include notes so users can see + // why they were charged or credited by admin + if (rc.Type == "admin_balance" || rc.Type == "admin_concurrency") && rc.Notes != "" { + out.Notes = &rc.Notes + } + + return out } // AccountSummaryFromService returns a minimal AccountSummary for usage log display. @@ -305,14 +372,9 @@ func AccountSummaryFromService(a *service.Account) *AccountSummary { } } -// usageLogFromServiceBase is a helper that converts service UsageLog to DTO. -// The account parameter allows caller to control what Account info is included. -// The includeIPAddress parameter controls whether to include the IP address (admin-only). -func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, includeIPAddress bool) *UsageLog { - if l == nil { - return nil - } - result := &UsageLog{ +func usageLogFromServiceUser(l *service.UsageLog) UsageLog { + // 普通用户 DTO:严禁包含管理员字段(例如 account_rate_multiplier、ip_address、account)。 + return UsageLog{ ID: l.ID, UserID: l.UserID, APIKeyID: l.APIKeyID, @@ -334,7 +396,6 @@ func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, inclu TotalCost: l.TotalCost, ActualCost: l.ActualCost, RateMultiplier: l.RateMultiplier, - AccountRateMultiplier: l.AccountRateMultiplier, BillingType: l.BillingType, Stream: l.Stream, DurationMs: l.DurationMs, @@ -345,30 +406,63 @@ func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, inclu CreatedAt: l.CreatedAt, User: UserFromServiceShallow(l.User), APIKey: APIKeyFromService(l.APIKey), - Account: account, Group: GroupFromServiceShallow(l.Group), Subscription: UserSubscriptionFromService(l.Subscription), } - // IP 地址仅对管理员可见 - if includeIPAddress { - result.IPAddress = l.IPAddress - } - return result } // UsageLogFromService converts a service UsageLog to DTO for regular users. // It excludes Account details and IP address - users should not see these. func UsageLogFromService(l *service.UsageLog) *UsageLog { - return usageLogFromServiceBase(l, nil, false) + if l == nil { + return nil + } + u := usageLogFromServiceUser(l) + return &u } // UsageLogFromServiceAdmin converts a service UsageLog to DTO for admin users. // It includes minimal Account info (ID, Name only) and IP address. -func UsageLogFromServiceAdmin(l *service.UsageLog) *UsageLog { +func UsageLogFromServiceAdmin(l *service.UsageLog) *AdminUsageLog { if l == nil { return nil } - return usageLogFromServiceBase(l, AccountSummaryFromService(l.Account), true) + return &AdminUsageLog{ + UsageLog: usageLogFromServiceUser(l), + AccountRateMultiplier: l.AccountRateMultiplier, + IPAddress: l.IPAddress, + Account: AccountSummaryFromService(l.Account), + } +} + +func UsageCleanupTaskFromService(task *service.UsageCleanupTask) *UsageCleanupTask { + if task == nil { + return nil + } + return &UsageCleanupTask{ + ID: task.ID, + Status: task.Status, + Filters: UsageCleanupFilters{ + StartTime: task.Filters.StartTime, + EndTime: task.Filters.EndTime, + UserID: task.Filters.UserID, + APIKeyID: task.Filters.APIKeyID, + AccountID: task.Filters.AccountID, + GroupID: task.Filters.GroupID, + Model: task.Filters.Model, + Stream: task.Filters.Stream, + BillingType: task.Filters.BillingType, + }, + CreatedBy: task.CreatedBy, + DeletedRows: task.DeletedRows, + ErrorMessage: task.ErrorMsg, + CanceledBy: task.CanceledBy, + CanceledAt: task.CanceledAt, + StartedAt: task.StartedAt, + FinishedAt: task.FinishedAt, + CreatedAt: task.CreatedAt, + UpdatedAt: task.UpdatedAt, + } } func SettingFromService(s *service.Setting) *Setting { @@ -387,7 +481,27 @@ func UserSubscriptionFromService(sub *service.UserSubscription) *UserSubscriptio if sub == nil { return nil } - return &UserSubscription{ + out := userSubscriptionFromServiceBase(sub) + return &out +} + +// UserSubscriptionFromServiceAdmin converts a service UserSubscription to DTO for admin users. +// It includes assignment metadata and notes. +func UserSubscriptionFromServiceAdmin(sub *service.UserSubscription) *AdminUserSubscription { + if sub == nil { + return nil + } + return &AdminUserSubscription{ + UserSubscription: userSubscriptionFromServiceBase(sub), + AssignedBy: sub.AssignedBy, + AssignedAt: sub.AssignedAt, + Notes: sub.Notes, + AssignedByUser: UserFromServiceShallow(sub.AssignedByUser), + } +} + +func userSubscriptionFromServiceBase(sub *service.UserSubscription) UserSubscription { + return UserSubscription{ ID: sub.ID, UserID: sub.UserID, GroupID: sub.GroupID, @@ -400,14 +514,10 @@ func UserSubscriptionFromService(sub *service.UserSubscription) *UserSubscriptio DailyUsageUSD: sub.DailyUsageUSD, WeeklyUsageUSD: sub.WeeklyUsageUSD, MonthlyUsageUSD: sub.MonthlyUsageUSD, - AssignedBy: sub.AssignedBy, - AssignedAt: sub.AssignedAt, - Notes: sub.Notes, CreatedAt: sub.CreatedAt, UpdatedAt: sub.UpdatedAt, User: UserFromServiceShallow(sub.User), Group: GroupFromServiceShallow(sub.Group), - AssignedByUser: UserFromServiceShallow(sub.AssignedByUser), } } @@ -415,9 +525,9 @@ func BulkAssignResultFromService(r *service.BulkAssignResult) *BulkAssignResult if r == nil { return nil } - subs := make([]UserSubscription, 0, len(r.Subscriptions)) + subs := make([]AdminUserSubscription, 0, len(r.Subscriptions)) for i := range r.Subscriptions { - subs = append(subs, *UserSubscriptionFromService(&r.Subscriptions[i])) + subs = append(subs, *UserSubscriptionFromServiceAdmin(&r.Subscriptions[i])) } return &BulkAssignResult{ SuccessCount: r.SuccessCount, diff --git a/backend/internal/handler/dto/settings.go b/backend/internal/handler/dto/settings.go index 81206def..152da756 100644 --- a/backend/internal/handler/dto/settings.go +++ b/backend/internal/handler/dto/settings.go @@ -2,8 +2,12 @@ package dto // SystemSettings represents the admin settings API response payload. type SystemSettings struct { - RegistrationEnabled bool `json:"registration_enabled"` - EmailVerifyEnabled bool `json:"email_verify_enabled"` + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + PromoCodeEnabled bool `json:"promo_code_enabled"` + PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证 + TotpEncryptionKeyConfigured bool `json:"totp_encryption_key_configured"` // TOTP 加密密钥是否已配置 SMTPHost string `json:"smtp_host"` SMTPPort int `json:"smtp_port"` @@ -22,13 +26,16 @@ type SystemSettings struct { LinuxDoConnectClientSecretConfigured bool `json:"linuxdo_connect_client_secret_configured"` LinuxDoConnectRedirectURL string `json:"linuxdo_connect_redirect_url"` - SiteName string `json:"site_name"` - SiteLogo string `json:"site_logo"` - SiteSubtitle string `json:"site_subtitle"` - APIBaseURL string `json:"api_base_url"` - ContactInfo string `json:"contact_info"` - DocURL string `json:"doc_url"` - HomeContent string `json:"home_content"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo"` + SiteSubtitle string `json:"site_subtitle"` + APIBaseURL string `json:"api_base_url"` + ContactInfo string `json:"contact_info"` + DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` + HideCcsImportButton bool `json:"hide_ccs_import_button"` + PurchaseSubscriptionEnabled bool `json:"purchase_subscription_enabled"` + PurchaseSubscriptionURL string `json:"purchase_subscription_url"` DefaultConcurrency int `json:"default_concurrency"` DefaultBalance float64 `json:"default_balance"` @@ -52,19 +59,25 @@ type SystemSettings struct { } type PublicSettings struct { - RegistrationEnabled bool `json:"registration_enabled"` - EmailVerifyEnabled bool `json:"email_verify_enabled"` - TurnstileEnabled bool `json:"turnstile_enabled"` - TurnstileSiteKey string `json:"turnstile_site_key"` - SiteName string `json:"site_name"` - SiteLogo string `json:"site_logo"` - SiteSubtitle string `json:"site_subtitle"` - APIBaseURL string `json:"api_base_url"` - ContactInfo string `json:"contact_info"` - DocURL string `json:"doc_url"` - HomeContent string `json:"home_content"` - LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` - Version string `json:"version"` + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + PromoCodeEnabled bool `json:"promo_code_enabled"` + PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` // TOTP 双因素认证 + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo"` + SiteSubtitle string `json:"site_subtitle"` + APIBaseURL string `json:"api_base_url"` + ContactInfo string `json:"contact_info"` + DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` + HideCcsImportButton bool `json:"hide_ccs_import_button"` + PurchaseSubscriptionEnabled bool `json:"purchase_subscription_enabled"` + PurchaseSubscriptionURL string `json:"purchase_subscription_url"` + LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` + Version string `json:"version"` } // StreamTimeoutSettings 流超时处理配置 DTO diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index abb9494a..7b33e473 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -11,7 +11,6 @@ type User struct { ID int64 `json:"id"` Email string `json:"email"` Username string `json:"username"` - Notes string `json:"notes"` Role string `json:"role"` Balance float64 `json:"balance"` Concurrency int `json:"concurrency"` @@ -24,6 +23,14 @@ type User struct { Subscriptions []UserSubscription `json:"subscriptions,omitempty"` } +// AdminUser 是管理员接口使用的 user DTO(包含敏感/内部字段)。 +// 注意:普通用户接口不得返回 notes 等管理员备注信息。 +type AdminUser struct { + User + + Notes string `json:"notes"` +} + type APIKey struct { ID int64 `json:"id"` UserID int64 `json:"user_id"` @@ -65,6 +72,15 @@ type Group struct { // 无效请求兜底分组 FallbackGroupIDOnInvalidRequest *int64 `json:"fallback_group_id_on_invalid_request"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// AdminGroup 是管理员接口使用的 group DTO(包含敏感/内部字段)。 +// 注意:普通用户接口不得返回 model_routing/account_count/account_groups 等内部信息。 +type AdminGroup struct { + Group + // 模型路由配置(仅 anthropic 平台使用) ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled bool `json:"model_routing_enabled"` @@ -72,9 +88,6 @@ type Group struct { // MCP XML 协议注入(仅 antigravity 平台使用) MCPXMLInject bool `json:"mcp_xml_inject"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - AccountGroups []AccountGroup `json:"account_groups,omitempty"` AccountCount int64 `json:"account_count,omitempty"` } @@ -125,6 +138,15 @@ type Account struct { MaxSessions *int `json:"max_sessions,omitempty"` SessionIdleTimeoutMin *int `json:"session_idle_timeout_minutes,omitempty"` + // TLS指纹伪装(仅 Anthropic OAuth/SetupToken 账号有效) + // 从 extra 字段提取,方便前端显示和编辑 + EnableTLSFingerprint *bool `json:"enable_tls_fingerprint,omitempty"` + + // 会话ID伪装(仅 Anthropic OAuth/SetupToken 账号有效) + // 启用后将在15分钟内固定 metadata.user_id 中的 session ID + // 从 extra 字段提取,方便前端显示和编辑 + EnableSessionIDMasking *bool `json:"session_id_masking_enabled,omitempty"` + Proxy *Proxy `json:"proxy,omitempty"` AccountGroups []AccountGroup `json:"account_groups,omitempty"` @@ -184,16 +206,28 @@ type RedeemCode struct { Status string `json:"status"` UsedBy *int64 `json:"used_by"` UsedAt *time.Time `json:"used_at"` - Notes string `json:"notes"` CreatedAt time.Time `json:"created_at"` GroupID *int64 `json:"group_id"` ValidityDays int `json:"validity_days"` + // Notes is only populated for admin_balance/admin_concurrency types + // so users can see why they were charged or credited + Notes *string `json:"notes,omitempty"` + User *User `json:"user,omitempty"` Group *Group `json:"group,omitempty"` } +// AdminRedeemCode 是管理员接口使用的 redeem code DTO(包含 notes 等字段)。 +// 注意:普通用户接口不得返回 notes 等内部信息。 +type AdminRedeemCode struct { + RedeemCode + + Notes string `json:"notes"` +} + +// UsageLog 是普通用户接口使用的 usage log DTO(不包含管理员字段)。 type UsageLog struct { ID int64 `json:"id"` UserID int64 `json:"user_id"` @@ -213,14 +247,13 @@ type UsageLog struct { CacheCreation5mTokens int `json:"cache_creation_5m_tokens"` CacheCreation1hTokens int `json:"cache_creation_1h_tokens"` - InputCost float64 `json:"input_cost"` - OutputCost float64 `json:"output_cost"` - CacheCreationCost float64 `json:"cache_creation_cost"` - CacheReadCost float64 `json:"cache_read_cost"` - TotalCost float64 `json:"total_cost"` - ActualCost float64 `json:"actual_cost"` - RateMultiplier float64 `json:"rate_multiplier"` - AccountRateMultiplier *float64 `json:"account_rate_multiplier"` + InputCost float64 `json:"input_cost"` + OutputCost float64 `json:"output_cost"` + CacheCreationCost float64 `json:"cache_creation_cost"` + CacheReadCost float64 `json:"cache_read_cost"` + TotalCost float64 `json:"total_cost"` + ActualCost float64 `json:"actual_cost"` + RateMultiplier float64 `json:"rate_multiplier"` BillingType int8 `json:"billing_type"` Stream bool `json:"stream"` @@ -234,18 +267,55 @@ type UsageLog struct { // User-Agent UserAgent *string `json:"user_agent"` - // IP 地址(仅管理员可见) - IPAddress *string `json:"ip_address,omitempty"` - CreatedAt time.Time `json:"created_at"` User *User `json:"user,omitempty"` APIKey *APIKey `json:"api_key,omitempty"` - Account *AccountSummary `json:"account,omitempty"` // Use minimal AccountSummary to prevent data leakage Group *Group `json:"group,omitempty"` Subscription *UserSubscription `json:"subscription,omitempty"` } +// AdminUsageLog 是管理员接口使用的 usage log DTO(包含管理员字段)。 +type AdminUsageLog struct { + UsageLog + + // AccountRateMultiplier 账号计费倍率快照(nil 表示按 1.0 处理) + AccountRateMultiplier *float64 `json:"account_rate_multiplier"` + + // IPAddress 用户请求 IP(仅管理员可见) + IPAddress *string `json:"ip_address,omitempty"` + + // Account 最小账号信息(避免泄露敏感字段) + Account *AccountSummary `json:"account,omitempty"` +} + +type UsageCleanupFilters struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + UserID *int64 `json:"user_id,omitempty"` + APIKeyID *int64 `json:"api_key_id,omitempty"` + AccountID *int64 `json:"account_id,omitempty"` + GroupID *int64 `json:"group_id,omitempty"` + Model *string `json:"model,omitempty"` + Stream *bool `json:"stream,omitempty"` + BillingType *int8 `json:"billing_type,omitempty"` +} + +type UsageCleanupTask struct { + ID int64 `json:"id"` + Status string `json:"status"` + Filters UsageCleanupFilters `json:"filters"` + CreatedBy int64 `json:"created_by"` + DeletedRows int64 `json:"deleted_rows"` + ErrorMessage *string `json:"error_message,omitempty"` + CanceledBy *int64 `json:"canceled_by,omitempty"` + CanceledAt *time.Time `json:"canceled_at,omitempty"` + StartedAt *time.Time `json:"started_at,omitempty"` + FinishedAt *time.Time `json:"finished_at,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + // AccountSummary is a minimal account info for usage log display. // It intentionally excludes sensitive fields like Credentials, Proxy, etc. type AccountSummary struct { @@ -277,23 +347,30 @@ type UserSubscription struct { WeeklyUsageUSD float64 `json:"weekly_usage_usd"` MonthlyUsageUSD float64 `json:"monthly_usage_usd"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + User *User `json:"user,omitempty"` + Group *Group `json:"group,omitempty"` +} + +// AdminUserSubscription 是管理员接口使用的订阅 DTO(包含分配信息/备注等字段)。 +// 注意:普通用户接口不得返回 assigned_by/assigned_at/notes/assigned_by_user 等管理员字段。 +type AdminUserSubscription struct { + UserSubscription + AssignedBy *int64 `json:"assigned_by"` AssignedAt time.Time `json:"assigned_at"` Notes string `json:"notes"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - - User *User `json:"user,omitempty"` - Group *Group `json:"group,omitempty"` - AssignedByUser *User `json:"assigned_by_user,omitempty"` + AssignedByUser *User `json:"assigned_by_user,omitempty"` } type BulkAssignResult struct { - SuccessCount int `json:"success_count"` - FailedCount int `json:"failed_count"` - Subscriptions []UserSubscription `json:"subscriptions"` - Errors []string `json:"errors"` + SuccessCount int `json:"success_count"` + FailedCount int `json:"failed_count"` + Subscriptions []AdminUserSubscription `json:"subscriptions"` + Errors []string `json:"errors"` } // PromoCode 注册优惠码 diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index fdb6411c..86564db3 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -31,6 +31,7 @@ type GatewayHandler struct { antigravityGatewayService *service.AntigravityGatewayService userService *service.UserService billingCacheService *service.BillingCacheService + usageService *service.UsageService concurrencyHelper *ConcurrencyHelper maxAccountSwitches int maxAccountSwitchesGemini int @@ -44,6 +45,7 @@ func NewGatewayHandler( userService *service.UserService, concurrencyService *service.ConcurrencyService, billingCacheService *service.BillingCacheService, + usageService *service.UsageService, cfg *config.Config, ) *GatewayHandler { pingInterval := time.Duration(0) @@ -64,6 +66,7 @@ func NewGatewayHandler( antigravityGatewayService: antigravityGatewayService, userService: userService, billingCacheService: billingCacheService, + usageService: usageService, concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude, pingInterval), maxAccountSwitches: maxAccountSwitches, maxAccountSwitchesGemini: maxAccountSwitchesGemini, @@ -210,17 +213,20 @@ func (h *GatewayHandler) Messages(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) - // 检查预热请求拦截(在账号选择后、转发前检查) - if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() + // 检查请求拦截(预热请求、SUGGESTION MODE等) + if account.IsInterceptWarmupEnabled() { + interceptType := detectInterceptType(body) + if interceptType != InterceptTypeNone { + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + if reqStream { + sendMockInterceptStream(c, reqModel, interceptType) + } else { + sendMockInterceptResponse(c, reqModel, interceptType) + } + return } - if reqStream { - sendMockWarmupStream(c, reqModel) - } else { - sendMockWarmupResponse(c, reqModel) - } - return } // 3. 获取账号并发槽位 @@ -359,17 +365,20 @@ func (h *GatewayHandler) Messages(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) - // 检查预热请求拦截(在账号选择后、转发前检查) - if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() + // 检查请求拦截(预热请求、SUGGESTION MODE等) + if account.IsInterceptWarmupEnabled() { + interceptType := detectInterceptType(body) + if interceptType != InterceptTypeNone { + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + if reqStream { + sendMockInterceptStream(c, reqModel, interceptType) + } else { + sendMockInterceptResponse(c, reqModel, interceptType) + } + return } - if reqStream { - sendMockWarmupStream(c, reqModel) - } else { - sendMockWarmupResponse(c, reqModel) - } - return } // 3. 获取账号并发槽位 @@ -588,7 +597,7 @@ func cloneAPIKeyWithGroup(apiKey *service.APIKey, group *service.Group) *service return &cloned } -// Usage handles getting account balance for CC Switch integration +// Usage handles getting account balance and usage statistics for CC Switch integration // GET /v1/usage func (h *GatewayHandler) Usage(c *gin.Context) { apiKey, ok := middleware2.GetAPIKeyFromContext(c) @@ -603,7 +612,40 @@ func (h *GatewayHandler) Usage(c *gin.Context) { return } - // 订阅模式:返回订阅限额信息 + // Best-effort: 获取用量统计,失败不影响基础响应 + var usageData gin.H + if h.usageService != nil { + dashStats, err := h.usageService.GetUserDashboardStats(c.Request.Context(), subject.UserID) + if err == nil && dashStats != nil { + usageData = gin.H{ + "today": gin.H{ + "requests": dashStats.TodayRequests, + "input_tokens": dashStats.TodayInputTokens, + "output_tokens": dashStats.TodayOutputTokens, + "cache_creation_tokens": dashStats.TodayCacheCreationTokens, + "cache_read_tokens": dashStats.TodayCacheReadTokens, + "total_tokens": dashStats.TodayTokens, + "cost": dashStats.TodayCost, + "actual_cost": dashStats.TodayActualCost, + }, + "total": gin.H{ + "requests": dashStats.TotalRequests, + "input_tokens": dashStats.TotalInputTokens, + "output_tokens": dashStats.TotalOutputTokens, + "cache_creation_tokens": dashStats.TotalCacheCreationTokens, + "cache_read_tokens": dashStats.TotalCacheReadTokens, + "total_tokens": dashStats.TotalTokens, + "cost": dashStats.TotalCost, + "actual_cost": dashStats.TotalActualCost, + }, + "average_duration_ms": dashStats.AverageDurationMs, + "rpm": dashStats.Rpm, + "tpm": dashStats.Tpm, + } + } + } + + // 订阅模式:返回订阅限额信息 + 用量统计 if apiKey.Group != nil && apiKey.Group.IsSubscriptionType() { subscription, ok := middleware2.GetSubscriptionFromContext(c) if !ok { @@ -612,28 +654,46 @@ func (h *GatewayHandler) Usage(c *gin.Context) { } remaining := h.calculateSubscriptionRemaining(apiKey.Group, subscription) - c.JSON(http.StatusOK, gin.H{ + resp := gin.H{ "isValid": true, "planName": apiKey.Group.Name, "remaining": remaining, "unit": "USD", - }) + "subscription": gin.H{ + "daily_usage_usd": subscription.DailyUsageUSD, + "weekly_usage_usd": subscription.WeeklyUsageUSD, + "monthly_usage_usd": subscription.MonthlyUsageUSD, + "daily_limit_usd": apiKey.Group.DailyLimitUSD, + "weekly_limit_usd": apiKey.Group.WeeklyLimitUSD, + "monthly_limit_usd": apiKey.Group.MonthlyLimitUSD, + "expires_at": subscription.ExpiresAt, + }, + } + if usageData != nil { + resp["usage"] = usageData + } + c.JSON(http.StatusOK, resp) return } - // 余额模式:返回钱包余额 + // 余额模式:返回钱包余额 + 用量统计 latestUser, err := h.userService.GetByID(c.Request.Context(), subject.UserID) if err != nil { h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to get user info") return } - c.JSON(http.StatusOK, gin.H{ + resp := gin.H{ "isValid": true, "planName": "钱包余额", "remaining": latestUser.Balance, "unit": "USD", - }) + "balance": latestUser.Balance, + } + if usageData != nil { + resp["usage"] = usageData + } + c.JSON(http.StatusOK, resp) } // calculateSubscriptionRemaining 计算订阅剩余可用额度 @@ -835,17 +895,30 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { } } -// isWarmupRequest 检测是否为预热请求(标题生成、Warmup等) -func isWarmupRequest(body []byte) bool { - // 快速检查:如果body不包含关键字,直接返回false +// InterceptType 表示请求拦截类型 +type InterceptType int + +const ( + InterceptTypeNone InterceptType = iota + InterceptTypeWarmup // 预热请求(返回 "New Conversation") + InterceptTypeSuggestionMode // SUGGESTION MODE(返回空字符串) +) + +// detectInterceptType 检测请求是否需要拦截,返回拦截类型 +func detectInterceptType(body []byte) InterceptType { + // 快速检查:如果不包含任何关键字,直接返回 bodyStr := string(body) - if !strings.Contains(bodyStr, "title") && !strings.Contains(bodyStr, "Warmup") { - return false + hasSuggestionMode := strings.Contains(bodyStr, "[SUGGESTION MODE:") + hasWarmupKeyword := strings.Contains(bodyStr, "title") || strings.Contains(bodyStr, "Warmup") + + if !hasSuggestionMode && !hasWarmupKeyword { + return InterceptTypeNone } - // 解析完整请求 + // 解析请求(只解析一次) var req struct { Messages []struct { + Role string `json:"role"` Content []struct { Type string `json:"type"` Text string `json:"text"` @@ -856,43 +929,71 @@ func isWarmupRequest(body []byte) bool { } `json:"system"` } if err := json.Unmarshal(body, &req); err != nil { - return false + return InterceptTypeNone } - // 检查 messages 中的标题提示模式 - for _, msg := range req.Messages { - for _, content := range msg.Content { - if content.Type == "text" { - if strings.Contains(content.Text, "Please write a 5-10 word title for the following conversation:") || - content.Text == "Warmup" { - return true + // 检查 SUGGESTION MODE(最后一条 user 消息) + if hasSuggestionMode && len(req.Messages) > 0 { + lastMsg := req.Messages[len(req.Messages)-1] + if lastMsg.Role == "user" && len(lastMsg.Content) > 0 && + lastMsg.Content[0].Type == "text" && + strings.HasPrefix(lastMsg.Content[0].Text, "[SUGGESTION MODE:") { + return InterceptTypeSuggestionMode + } + } + + // 检查 Warmup 请求 + if hasWarmupKeyword { + // 检查 messages 中的标题提示模式 + for _, msg := range req.Messages { + for _, content := range msg.Content { + if content.Type == "text" { + if strings.Contains(content.Text, "Please write a 5-10 word title for the following conversation:") || + content.Text == "Warmup" { + return InterceptTypeWarmup + } } } } + // 检查 system 中的标题提取模式 + for _, sys := range req.System { + if strings.Contains(sys.Text, "nalyze if this message indicates a new conversation topic. If it does, extract a 2-3 word title") { + return InterceptTypeWarmup + } + } } - // 检查 system 中的标题提取模式 - for _, system := range req.System { - if strings.Contains(system.Text, "nalyze if this message indicates a new conversation topic. If it does, extract a 2-3 word title") { - return true - } - } - - return false + return InterceptTypeNone } -// sendMockWarmupStream 发送流式 mock 响应(用于预热请求拦截) -func sendMockWarmupStream(c *gin.Context, model string) { +// sendMockInterceptStream 发送流式 mock 响应(用于请求拦截) +func sendMockInterceptStream(c *gin.Context, model string, interceptType InterceptType) { c.Header("Content-Type", "text/event-stream") c.Header("Cache-Control", "no-cache") c.Header("Connection", "keep-alive") c.Header("X-Accel-Buffering", "no") + // 根据拦截类型决定响应内容 + var msgID string + var outputTokens int + var textDeltas []string + + switch interceptType { + case InterceptTypeSuggestionMode: + msgID = "msg_mock_suggestion" + outputTokens = 1 + textDeltas = []string{""} // 空内容 + default: // InterceptTypeWarmup + msgID = "msg_mock_warmup" + outputTokens = 2 + textDeltas = []string{"New", " Conversation"} + } + // Build message_start event with proper JSON marshaling messageStart := map[string]any{ "type": "message_start", "message": map[string]any{ - "id": "msg_mock_warmup", + "id": msgID, "type": "message", "role": "assistant", "model": model, @@ -907,16 +1008,46 @@ func sendMockWarmupStream(c *gin.Context, model string) { } messageStartJSON, _ := json.Marshal(messageStart) + // Build events events := []string{ `event: message_start` + "\n" + `data: ` + string(messageStartJSON), `event: content_block_start` + "\n" + `data: {"content_block":{"text":"","type":"text"},"index":0,"type":"content_block_start"}`, - `event: content_block_delta` + "\n" + `data: {"delta":{"text":"New","type":"text_delta"},"index":0,"type":"content_block_delta"}`, - `event: content_block_delta` + "\n" + `data: {"delta":{"text":" Conversation","type":"text_delta"},"index":0,"type":"content_block_delta"}`, - `event: content_block_stop` + "\n" + `data: {"index":0,"type":"content_block_stop"}`, - `event: message_delta` + "\n" + `data: {"delta":{"stop_reason":"end_turn","stop_sequence":null},"type":"message_delta","usage":{"input_tokens":10,"output_tokens":2}}`, - `event: message_stop` + "\n" + `data: {"type":"message_stop"}`, } + // Add text deltas + for _, text := range textDeltas { + delta := map[string]any{ + "type": "content_block_delta", + "index": 0, + "delta": map[string]string{ + "type": "text_delta", + "text": text, + }, + } + deltaJSON, _ := json.Marshal(delta) + events = append(events, `event: content_block_delta`+"\n"+`data: `+string(deltaJSON)) + } + + // Add final events + messageDelta := map[string]any{ + "type": "message_delta", + "delta": map[string]any{ + "stop_reason": "end_turn", + "stop_sequence": nil, + }, + "usage": map[string]int{ + "input_tokens": 10, + "output_tokens": outputTokens, + }, + } + messageDeltaJSON, _ := json.Marshal(messageDelta) + + events = append(events, + `event: content_block_stop`+"\n"+`data: {"index":0,"type":"content_block_stop"}`, + `event: message_delta`+"\n"+`data: `+string(messageDeltaJSON), + `event: message_stop`+"\n"+`data: {"type":"message_stop"}`, + ) + for _, event := range events { _, _ = c.Writer.WriteString(event + "\n\n") c.Writer.Flush() @@ -924,18 +1055,32 @@ func sendMockWarmupStream(c *gin.Context, model string) { } } -// sendMockWarmupResponse 发送非流式 mock 响应(用于预热请求拦截) -func sendMockWarmupResponse(c *gin.Context, model string) { +// sendMockInterceptResponse 发送非流式 mock 响应(用于请求拦截) +func sendMockInterceptResponse(c *gin.Context, model string, interceptType InterceptType) { + var msgID, text string + var outputTokens int + + switch interceptType { + case InterceptTypeSuggestionMode: + msgID = "msg_mock_suggestion" + text = "" + outputTokens = 1 + default: // InterceptTypeWarmup + msgID = "msg_mock_warmup" + text = "New Conversation" + outputTokens = 2 + } + c.JSON(http.StatusOK, gin.H{ - "id": "msg_mock_warmup", + "id": msgID, "type": "message", "role": "assistant", "model": model, - "content": []gin.H{{"type": "text", "text": "New Conversation"}}, + "content": []gin.H{{"type": "text", "text": text}}, "stop_reason": "end_turn", "usage": gin.H{ "input_tokens": 10, - "output_tokens": 2, + "output_tokens": outputTokens, }, }) } diff --git a/backend/internal/handler/gemini_cli_session_test.go b/backend/internal/handler/gemini_cli_session_test.go new file mode 100644 index 00000000..0b37f5f2 --- /dev/null +++ b/backend/internal/handler/gemini_cli_session_test.go @@ -0,0 +1,122 @@ +//go:build unit + +package handler + +import ( + "crypto/sha256" + "encoding/hex" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestExtractGeminiCLISessionHash(t *testing.T) { + tests := []struct { + name string + body string + privilegedUserID string + wantEmpty bool + wantHash string + }{ + { + name: "with privileged-user-id and tmp dir", + body: `{"contents":[{"parts":[{"text":"The project's temporary directory is: /Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740"}]}]}`, + privilegedUserID: "90785f52-8bbe-4b17-b111-a1ddea1636c3", + wantEmpty: false, + wantHash: func() string { + combined := "90785f52-8bbe-4b17-b111-a1ddea1636c3:f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740" + hash := sha256.Sum256([]byte(combined)) + return hex.EncodeToString(hash[:]) + }(), + }, + { + name: "without privileged-user-id but with tmp dir", + body: `{"contents":[{"parts":[{"text":"The project's temporary directory is: /Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740"}]}]}`, + privilegedUserID: "", + wantEmpty: false, + wantHash: "f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + }, + { + name: "without tmp dir", + body: `{"contents":[{"parts":[{"text":"Hello world"}]}]}`, + privilegedUserID: "90785f52-8bbe-4b17-b111-a1ddea1636c3", + wantEmpty: true, + }, + { + name: "empty body", + body: "", + privilegedUserID: "90785f52-8bbe-4b17-b111-a1ddea1636c3", + wantEmpty: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // 创建测试上下文 + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest("POST", "/test", nil) + if tt.privilegedUserID != "" { + c.Request.Header.Set("x-gemini-api-privileged-user-id", tt.privilegedUserID) + } + + // 调用函数 + result := extractGeminiCLISessionHash(c, []byte(tt.body)) + + // 验证结果 + if tt.wantEmpty { + require.Empty(t, result, "expected empty session hash") + } else { + require.NotEmpty(t, result, "expected non-empty session hash") + require.Equal(t, tt.wantHash, result, "session hash mismatch") + } + }) + } +} + +func TestGeminiCLITmpDirRegex(t *testing.T) { + tests := []struct { + name string + input string + wantMatch bool + wantHash string + }{ + { + name: "valid tmp dir path", + input: "/Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + wantMatch: true, + wantHash: "f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + }, + { + name: "valid tmp dir path in text", + input: "The project's temporary directory is: /Users/ianshaw/.gemini/tmp/f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740\nOther text", + wantMatch: true, + wantHash: "f7851b009ed314d1baee62e83115f486160283f4a55a582d89fdac8b9fe3b740", + }, + { + name: "invalid hash length", + input: "/Users/ianshaw/.gemini/tmp/abc123", + wantMatch: false, + }, + { + name: "no tmp dir", + input: "Hello world", + wantMatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + match := geminiCLITmpDirRegex.FindStringSubmatch(tt.input) + if tt.wantMatch { + require.NotNil(t, match, "expected regex to match") + require.Len(t, match, 2, "expected 2 capture groups") + require.Equal(t, tt.wantHash, match[1], "hash mismatch") + } else { + require.Nil(t, match, "expected regex not to match") + } + }) + } +} diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 1946aeb2..53431dc3 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -1,11 +1,15 @@ package handler import ( + "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" "io" "log" "net/http" + "regexp" "strings" "time" @@ -20,6 +24,17 @@ import ( "github.com/gin-gonic/gin" ) +// geminiCLITmpDirRegex 用于从 Gemini CLI 请求体中提取 tmp 目录的哈希值 +// 匹配格式: /Users/xxx/.gemini/tmp/[64位十六进制哈希] +var geminiCLITmpDirRegex = regexp.MustCompile(`/\.gemini/tmp/([A-Fa-f0-9]{64})`) + +func isGeminiCLIRequest(c *gin.Context, body []byte) bool { + if strings.TrimSpace(c.GetHeader("x-gemini-api-privileged-user-id")) != "" { + return true + } + return geminiCLITmpDirRegex.Match(body) +} + // GeminiV1BetaListModels proxies: // GET /v1beta/models func (h *GatewayHandler) GeminiV1BetaListModels(c *gin.Context) { @@ -215,12 +230,26 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { } // 3) select account (sticky session based on request body) - parsedReq, _ := service.ParseGatewayRequest(body) - sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) + // 优先使用 Gemini CLI 的会话标识(privileged-user-id + tmp 目录哈希) + sessionHash := extractGeminiCLISessionHash(c, body) + if sessionHash == "" { + // Fallback: 使用通用的会话哈希生成逻辑(适用于其他客户端) + parsedReq, _ := service.ParseGatewayRequest(body) + sessionHash = h.gatewayService.GenerateSessionHash(parsedReq) + } sessionKey := sessionHash if sessionHash != "" { sessionKey = "gemini:" + sessionHash } + + // 查询粘性会话绑定的账号 ID(用于检测账号切换) + var sessionBoundAccountID int64 + if sessionKey != "" { + sessionBoundAccountID, _ = h.gatewayService.GetCachedSessionAccountID(c.Request.Context(), apiKey.GroupID, sessionKey) + } + isCLI := isGeminiCLIRequest(c, body) + cleanedForUnknownBinding := false + maxAccountSwitches := h.maxAccountSwitchesGemini switchCount := 0 failedAccountIDs := make(map[int64]struct{}) @@ -239,6 +268,24 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { account := selection.Account setOpsSelectedAccount(c, account.ID) + // 检测账号切换:如果粘性会话绑定的账号与当前选择的账号不同,清除 thoughtSignature + // 注意:Gemini 原生 API 的 thoughtSignature 与具体上游账号强相关;跨账号透传会导致 400。 + if sessionBoundAccountID > 0 && sessionBoundAccountID != account.ID { + log.Printf("[Gemini] Sticky session account switched: %d -> %d, cleaning thoughtSignature", sessionBoundAccountID, account.ID) + body = service.CleanGeminiNativeThoughtSignatures(body) + sessionBoundAccountID = account.ID + } else if sessionKey != "" && sessionBoundAccountID == 0 && isCLI && !cleanedForUnknownBinding && bytes.Contains(body, []byte(`"thoughtSignature"`)) { + // 无缓存绑定但请求里已有 thoughtSignature:常见于缓存丢失/TTL 过期后,CLI 继续携带旧签名。 + // 为避免第一次转发就 400,这里做一次确定性清理,让新账号重新生成签名链路。 + log.Printf("[Gemini] Sticky session binding missing for CLI request, cleaning thoughtSignature proactively") + body = service.CleanGeminiNativeThoughtSignatures(body) + cleanedForUnknownBinding = true + sessionBoundAccountID = account.ID + } else if sessionBoundAccountID == 0 { + // 记录本次请求中首次选择到的账号,便于同一请求内 failover 时检测切换。 + sessionBoundAccountID = account.ID + } + // 4) account concurrency slot accountReleaseFunc := selection.ReleaseFunc if !selection.Acquired { @@ -438,3 +485,38 @@ func shouldFallbackGeminiModels(res *service.UpstreamHTTPResult) bool { } return false } + +// extractGeminiCLISessionHash 从 Gemini CLI 请求中提取会话标识。 +// 组合 x-gemini-api-privileged-user-id header 和请求体中的 tmp 目录哈希。 +// +// 会话标识生成策略: +// 1. 从请求体中提取 tmp 目录哈希(64位十六进制) +// 2. 从 header 中提取 privileged-user-id(UUID) +// 3. 组合两者生成 SHA256 哈希作为最终的会话标识 +// +// 如果找不到 tmp 目录哈希,返回空字符串(不使用粘性会话)。 +// +// extractGeminiCLISessionHash extracts session identifier from Gemini CLI requests. +// Combines x-gemini-api-privileged-user-id header with tmp directory hash from request body. +func extractGeminiCLISessionHash(c *gin.Context, body []byte) string { + // 1. 从请求体中提取 tmp 目录哈希 + match := geminiCLITmpDirRegex.FindSubmatch(body) + if len(match) < 2 { + return "" // 没有找到 tmp 目录,不使用粘性会话 + } + tmpDirHash := string(match[1]) + + // 2. 提取 privileged-user-id + privilegedUserID := strings.TrimSpace(c.GetHeader("x-gemini-api-privileged-user-id")) + + // 3. 组合生成最终的 session hash + if privilegedUserID != "" { + // 组合两个标识符:privileged-user-id + tmp 目录哈希 + combined := privilegedUserID + ":" + tmpDirHash + hash := sha256.Sum256([]byte(combined)) + return hex.EncodeToString(hash[:]) + } + + // 如果没有 privileged-user-id,直接使用 tmp 目录哈希 + return tmpDirHash +} diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index 5b1b317d..b8f7d417 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -10,6 +10,7 @@ type AdminHandlers struct { User *admin.UserHandler Group *admin.GroupHandler Account *admin.AccountHandler + Announcement *admin.AnnouncementHandler OAuth *admin.OAuthHandler OpenAIOAuth *admin.OpenAIOAuthHandler GeminiOAuth *admin.GeminiOAuthHandler @@ -33,10 +34,12 @@ type Handlers struct { Usage *UsageHandler Redeem *RedeemHandler Subscription *SubscriptionHandler + Announcement *AnnouncementHandler Admin *AdminHandlers Gateway *GatewayHandler OpenAIGateway *OpenAIGatewayHandler Setting *SettingHandler + Totp *TotpHandler } // BuildInfo contains build-time information diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 064473a0..4c9dd8b9 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -192,8 +192,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { return } - // Generate session hash (from header for OpenAI) - sessionHash := h.gatewayService.GenerateSessionHash(c) + // Generate session hash (header first; fallback to prompt_cache_key) + sessionHash := h.gatewayService.GenerateSessionHash(c, reqBody) maxAccountSwitches := h.maxAccountSwitches switchCount := 0 diff --git a/backend/internal/handler/ops_error_logger.go b/backend/internal/handler/ops_error_logger.go index f62e6b3e..36ffde63 100644 --- a/backend/internal/handler/ops_error_logger.go +++ b/backend/internal/handler/ops_error_logger.go @@ -905,7 +905,7 @@ func classifyOpsIsRetryable(errType string, statusCode int) bool { func classifyOpsIsBusinessLimited(errType, phase, code string, status int, message string) bool { switch strings.TrimSpace(code) { - case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID", "USER_INACTIVE": return true } if phase == "billing" || phase == "concurrency" { @@ -1011,5 +1011,12 @@ func shouldSkipOpsErrorLog(ctx context.Context, ops *service.OpsService, message } } + // Check if invalid/missing API key errors should be ignored (user misconfiguration) + if settings.IgnoreInvalidApiKeyErrors { + if strings.Contains(bodyLower, "invalid_api_key") || strings.Contains(bodyLower, "api_key_required") { + return true + } + } + return false } diff --git a/backend/internal/handler/setting_handler.go b/backend/internal/handler/setting_handler.go index cac79e9c..9fd27dc3 100644 --- a/backend/internal/handler/setting_handler.go +++ b/backend/internal/handler/setting_handler.go @@ -32,18 +32,24 @@ func (h *SettingHandler) GetPublicSettings(c *gin.Context) { } response.Success(c, dto.PublicSettings{ - RegistrationEnabled: settings.RegistrationEnabled, - EmailVerifyEnabled: settings.EmailVerifyEnabled, - TurnstileEnabled: settings.TurnstileEnabled, - TurnstileSiteKey: settings.TurnstileSiteKey, - SiteName: settings.SiteName, - SiteLogo: settings.SiteLogo, - SiteSubtitle: settings.SiteSubtitle, - APIBaseURL: settings.APIBaseURL, - ContactInfo: settings.ContactInfo, - DocURL: settings.DocURL, - HomeContent: settings.HomeContent, - LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, - Version: h.version, + RegistrationEnabled: settings.RegistrationEnabled, + EmailVerifyEnabled: settings.EmailVerifyEnabled, + PromoCodeEnabled: settings.PromoCodeEnabled, + PasswordResetEnabled: settings.PasswordResetEnabled, + TotpEnabled: settings.TotpEnabled, + TurnstileEnabled: settings.TurnstileEnabled, + TurnstileSiteKey: settings.TurnstileSiteKey, + SiteName: settings.SiteName, + SiteLogo: settings.SiteLogo, + SiteSubtitle: settings.SiteSubtitle, + APIBaseURL: settings.APIBaseURL, + ContactInfo: settings.ContactInfo, + DocURL: settings.DocURL, + HomeContent: settings.HomeContent, + HideCcsImportButton: settings.HideCcsImportButton, + PurchaseSubscriptionEnabled: settings.PurchaseSubscriptionEnabled, + PurchaseSubscriptionURL: settings.PurchaseSubscriptionURL, + LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, + Version: h.version, }) } diff --git a/backend/internal/handler/totp_handler.go b/backend/internal/handler/totp_handler.go new file mode 100644 index 00000000..5c5eb567 --- /dev/null +++ b/backend/internal/handler/totp_handler.go @@ -0,0 +1,181 @@ +package handler + +import ( + "github.com/gin-gonic/gin" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +// TotpHandler handles TOTP-related requests +type TotpHandler struct { + totpService *service.TotpService +} + +// NewTotpHandler creates a new TotpHandler +func NewTotpHandler(totpService *service.TotpService) *TotpHandler { + return &TotpHandler{ + totpService: totpService, + } +} + +// TotpStatusResponse represents the TOTP status response +type TotpStatusResponse struct { + Enabled bool `json:"enabled"` + EnabledAt *int64 `json:"enabled_at,omitempty"` // Unix timestamp + FeatureEnabled bool `json:"feature_enabled"` +} + +// GetStatus returns the TOTP status for the current user +// GET /api/v1/user/totp/status +func (h *TotpHandler) GetStatus(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + status, err := h.totpService.GetStatus(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + resp := TotpStatusResponse{ + Enabled: status.Enabled, + FeatureEnabled: status.FeatureEnabled, + } + + if status.EnabledAt != nil { + ts := status.EnabledAt.Unix() + resp.EnabledAt = &ts + } + + response.Success(c, resp) +} + +// TotpSetupRequest represents the request to initiate TOTP setup +type TotpSetupRequest struct { + EmailCode string `json:"email_code"` + Password string `json:"password"` +} + +// TotpSetupResponse represents the TOTP setup response +type TotpSetupResponse struct { + Secret string `json:"secret"` + QRCodeURL string `json:"qr_code_url"` + SetupToken string `json:"setup_token"` + Countdown int `json:"countdown"` +} + +// InitiateSetup starts the TOTP setup process +// POST /api/v1/user/totp/setup +func (h *TotpHandler) InitiateSetup(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req TotpSetupRequest + if err := c.ShouldBindJSON(&req); err != nil { + // Allow empty body (optional params) + req = TotpSetupRequest{} + } + + result, err := h.totpService.InitiateSetup(c.Request.Context(), subject.UserID, req.EmailCode, req.Password) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, TotpSetupResponse{ + Secret: result.Secret, + QRCodeURL: result.QRCodeURL, + SetupToken: result.SetupToken, + Countdown: result.Countdown, + }) +} + +// TotpEnableRequest represents the request to enable TOTP +type TotpEnableRequest struct { + TotpCode string `json:"totp_code" binding:"required,len=6"` + SetupToken string `json:"setup_token" binding:"required"` +} + +// Enable completes the TOTP setup +// POST /api/v1/user/totp/enable +func (h *TotpHandler) Enable(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req TotpEnableRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if err := h.totpService.CompleteSetup(c.Request.Context(), subject.UserID, req.TotpCode, req.SetupToken); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"success": true}) +} + +// TotpDisableRequest represents the request to disable TOTP +type TotpDisableRequest struct { + EmailCode string `json:"email_code"` + Password string `json:"password"` +} + +// Disable disables TOTP for the current user +// POST /api/v1/user/totp/disable +func (h *TotpHandler) Disable(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req TotpDisableRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if err := h.totpService.Disable(c.Request.Context(), subject.UserID, req.EmailCode, req.Password); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"success": true}) +} + +// GetVerificationMethod returns the verification method for TOTP operations +// GET /api/v1/user/totp/verification-method +func (h *TotpHandler) GetVerificationMethod(c *gin.Context) { + method := h.totpService.GetVerificationMethod(c.Request.Context()) + response.Success(c, method) +} + +// SendVerifyCode sends an email verification code for TOTP operations +// POST /api/v1/user/totp/send-code +func (h *TotpHandler) SendVerifyCode(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + if err := h.totpService.SendVerifyCode(c.Request.Context(), subject.UserID); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"success": true}) +} diff --git a/backend/internal/handler/user_handler.go b/backend/internal/handler/user_handler.go index d968951c..35862f1c 100644 --- a/backend/internal/handler/user_handler.go +++ b/backend/internal/handler/user_handler.go @@ -47,9 +47,6 @@ func (h *UserHandler) GetProfile(c *gin.Context) { return } - // 清空notes字段,普通用户不应看到备注 - userData.Notes = "" - response.Success(c, dto.UserFromService(userData)) } @@ -105,8 +102,5 @@ func (h *UserHandler) UpdateProfile(c *gin.Context) { return } - // 清空notes字段,普通用户不应看到备注 - updatedUser.Notes = "" - response.Success(c, dto.UserFromService(updatedUser)) } diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index 2af7905e..48a3794b 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -13,6 +13,7 @@ func ProvideAdminHandlers( userHandler *admin.UserHandler, groupHandler *admin.GroupHandler, accountHandler *admin.AccountHandler, + announcementHandler *admin.AnnouncementHandler, oauthHandler *admin.OAuthHandler, openaiOAuthHandler *admin.OpenAIOAuthHandler, geminiOAuthHandler *admin.GeminiOAuthHandler, @@ -32,6 +33,7 @@ func ProvideAdminHandlers( User: userHandler, Group: groupHandler, Account: accountHandler, + Announcement: announcementHandler, OAuth: oauthHandler, OpenAIOAuth: openaiOAuthHandler, GeminiOAuth: geminiOAuthHandler, @@ -66,10 +68,12 @@ func ProvideHandlers( usageHandler *UsageHandler, redeemHandler *RedeemHandler, subscriptionHandler *SubscriptionHandler, + announcementHandler *AnnouncementHandler, adminHandlers *AdminHandlers, gatewayHandler *GatewayHandler, openaiGatewayHandler *OpenAIGatewayHandler, settingHandler *SettingHandler, + totpHandler *TotpHandler, ) *Handlers { return &Handlers{ Auth: authHandler, @@ -78,10 +82,12 @@ func ProvideHandlers( Usage: usageHandler, Redeem: redeemHandler, Subscription: subscriptionHandler, + Announcement: announcementHandler, Admin: adminHandlers, Gateway: gatewayHandler, OpenAIGateway: openaiGatewayHandler, Setting: settingHandler, + Totp: totpHandler, } } @@ -94,8 +100,10 @@ var ProviderSet = wire.NewSet( NewUsageHandler, NewRedeemHandler, NewSubscriptionHandler, + NewAnnouncementHandler, NewGatewayHandler, NewOpenAIGatewayHandler, + NewTotpHandler, ProvideSettingHandler, // Admin handlers @@ -103,6 +111,7 @@ var ProviderSet = wire.NewSet( admin.NewUserHandler, admin.NewGroupHandler, admin.NewAccountHandler, + admin.NewAnnouncementHandler, admin.NewOAuthHandler, admin.NewOpenAIOAuthHandler, admin.NewGeminiOAuthHandler, diff --git a/backend/internal/middleware/rate_limiter_integration_test.go b/backend/internal/middleware/rate_limiter_integration_test.go index 4759a988..1161364b 100644 --- a/backend/internal/middleware/rate_limiter_integration_test.go +++ b/backend/internal/middleware/rate_limiter_integration_test.go @@ -7,6 +7,9 @@ import ( "fmt" "net/http" "net/http/httptest" + "os" + "path/filepath" + "strconv" "testing" "time" @@ -88,6 +91,7 @@ func performRequest(router *gin.Engine) *httptest.ResponseRecorder { func startRedis(t *testing.T, ctx context.Context) *redis.Client { t.Helper() + ensureDockerAvailable(t) redisContainer, err := tcredis.Run(ctx, redisImageTag) require.NoError(t, err) @@ -112,3 +116,43 @@ func startRedis(t *testing.T, ctx context.Context) *redis.Client { return rdb } + +func ensureDockerAvailable(t *testing.T) { + t.Helper() + if dockerAvailable() { + return + } + t.Skip("Docker 未启用,跳过依赖 testcontainers 的集成测试") +} + +func dockerAvailable() bool { + if os.Getenv("DOCKER_HOST") != "" { + return true + } + + socketCandidates := []string{ + "/var/run/docker.sock", + filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "docker.sock"), + filepath.Join(userHomeDir(), ".docker", "run", "docker.sock"), + filepath.Join(userHomeDir(), ".docker", "desktop", "docker.sock"), + filepath.Join("/run/user", strconv.Itoa(os.Getuid()), "docker.sock"), + } + + for _, socket := range socketCandidates { + if socket == "" { + continue + } + if _, err := os.Stat(socket); err == nil { + return true + } + } + return false +} + +func userHomeDir() string { + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return home +} diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go index 99bedb01..d1712c98 100644 --- a/backend/internal/pkg/antigravity/oauth.go +++ b/backend/internal/pkg/antigravity/oauth.go @@ -33,7 +33,7 @@ const ( "https://www.googleapis.com/auth/experimentsandconfigs" // User-Agent(与 Antigravity-Manager 保持一致) - UserAgent = "antigravity/1.11.9 windows/amd64" + UserAgent = "antigravity/1.15.8 windows/amd64" // Session 过期时间 SessionTTL = 30 * time.Minute diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 720e6f6a..a75bf6b3 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -369,8 +369,10 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu Text: block.Thinking, Thought: true, } - // 保留原有 signature(Claude 模型需要有效的 signature) - if block.Signature != "" { + // signature 处理: + // - Claude 模型(allowDummyThought=false):必须是上游返回的真实 signature(dummy 视为缺失) + // - Gemini 模型(allowDummyThought=true):优先透传真实 signature,缺失时使用 dummy signature + if block.Signature != "" && (allowDummyThought || block.Signature != dummyThoughtSignature) { part.ThoughtSignature = block.Signature } else if !allowDummyThought { // Claude 模型需要有效 signature;在缺失时降级为普通文本,并在上层禁用 thinking mode。 @@ -409,12 +411,12 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu }, } // tool_use 的 signature 处理: - // - Gemini 模型:使用 dummy signature(跳过 thought_signature 校验) - // - Claude 模型:透传上游返回的真实 signature(Vertex/Google 需要完整签名链路) - if allowDummyThought { - part.ThoughtSignature = dummyThoughtSignature - } else if block.Signature != "" && block.Signature != dummyThoughtSignature { + // - Claude 模型(allowDummyThought=false):必须是上游返回的真实 signature(dummy 视为缺失) + // - Gemini 模型(allowDummyThought=true):优先透传真实 signature,缺失时使用 dummy signature + if block.Signature != "" && (allowDummyThought || block.Signature != dummyThoughtSignature) { part.ThoughtSignature = block.Signature + } else if allowDummyThought { + part.ThoughtSignature = dummyThoughtSignature } parts = append(parts, part) diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go index 60ee6f63..9d62a4a1 100644 --- a/backend/internal/pkg/antigravity/request_transformer_test.go +++ b/backend/internal/pkg/antigravity/request_transformer_test.go @@ -100,7 +100,7 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { {"type": "tool_use", "id": "t1", "name": "Bash", "input": {"command": "ls"}, "signature": "sig_tool_abc"} ]` - t.Run("Gemini uses dummy tool_use signature", func(t *testing.T) { + t.Run("Gemini preserves provided tool_use signature", func(t *testing.T) { toolIDToName := make(map[string]string) parts, _, err := buildParts(json.RawMessage(content), toolIDToName, true) if err != nil { @@ -109,6 +109,23 @@ func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { if len(parts) != 1 || parts[0].FunctionCall == nil { t.Fatalf("expected 1 functionCall part, got %+v", parts) } + if parts[0].ThoughtSignature != "sig_tool_abc" { + t.Fatalf("expected preserved tool signature %q, got %q", "sig_tool_abc", parts[0].ThoughtSignature) + } + }) + + t.Run("Gemini falls back to dummy tool_use signature when missing", func(t *testing.T) { + contentNoSig := `[ + {"type": "tool_use", "id": "t1", "name": "Bash", "input": {"command": "ls"}} + ]` + toolIDToName := make(map[string]string) + parts, _, err := buildParts(json.RawMessage(contentNoSig), toolIDToName, true) + if err != nil { + t.Fatalf("buildParts() error = %v", err) + } + if len(parts) != 1 || parts[0].FunctionCall == nil { + t.Fatalf("expected 1 functionCall part, got %+v", parts) + } if parts[0].ThoughtSignature != dummyThoughtSignature { t.Fatalf("expected dummy tool signature %q, got %q", dummyThoughtSignature, parts[0].ThoughtSignature) } diff --git a/backend/internal/pkg/antigravity/response_transformer.go b/backend/internal/pkg/antigravity/response_transformer.go index a605fee2..eb16f09d 100644 --- a/backend/internal/pkg/antigravity/response_transformer.go +++ b/backend/internal/pkg/antigravity/response_transformer.go @@ -20,6 +20,15 @@ func TransformGeminiToClaude(geminiResp []byte, originalModel string) ([]byte, * v1Resp.Response = directResp v1Resp.ResponseID = directResp.ResponseID v1Resp.ModelVersion = directResp.ModelVersion + } else if len(v1Resp.Response.Candidates) == 0 { + // 第一次解析成功但 candidates 为空,说明是直接的 GeminiResponse 格式 + var directResp GeminiResponse + if err2 := json.Unmarshal(geminiResp, &directResp); err2 != nil { + return nil, nil, fmt.Errorf("parse gemini response as direct: %w", err2) + } + v1Resp.Response = directResp + v1Resp.ResponseID = directResp.ResponseID + v1Resp.ModelVersion = directResp.ModelVersion } // 使用处理器转换 @@ -174,16 +183,20 @@ func (p *NonStreamingProcessor) processPart(part *GeminiPart) { p.trailingSignature = "" } - p.textBuilder += part.Text - - // 非空 text 带签名 - 立即刷新并输出空 thinking 块 + // 非空 text 带签名 - 特殊处理:先输出 text,再输出空 thinking 块 if signature != "" { - p.flushText() + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "text", + Text: part.Text, + }) p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ Type: "thinking", Thinking: "", Signature: signature, }) + } else { + // 普通 text (无签名) - 累积到 builder + p.textBuilder += part.Text } } } diff --git a/backend/internal/pkg/gemini/models.go b/backend/internal/pkg/gemini/models.go index e251c8d8..424e8ddb 100644 --- a/backend/internal/pkg/gemini/models.go +++ b/backend/internal/pkg/gemini/models.go @@ -16,14 +16,11 @@ type ModelsListResponse struct { func DefaultModels() []Model { methods := []string{"generateContent", "streamGenerateContent"} return []Model{ - {Name: "models/gemini-3-pro-preview", SupportedGenerationMethods: methods}, - {Name: "models/gemini-3-flash-preview", SupportedGenerationMethods: methods}, - {Name: "models/gemini-2.5-pro", SupportedGenerationMethods: methods}, - {Name: "models/gemini-2.5-flash", SupportedGenerationMethods: methods}, {Name: "models/gemini-2.0-flash", SupportedGenerationMethods: methods}, - {Name: "models/gemini-1.5-pro", SupportedGenerationMethods: methods}, - {Name: "models/gemini-1.5-flash", SupportedGenerationMethods: methods}, - {Name: "models/gemini-1.5-flash-8b", SupportedGenerationMethods: methods}, + {Name: "models/gemini-2.5-flash", SupportedGenerationMethods: methods}, + {Name: "models/gemini-2.5-pro", SupportedGenerationMethods: methods}, + {Name: "models/gemini-3-flash-preview", SupportedGenerationMethods: methods}, + {Name: "models/gemini-3-pro-preview", SupportedGenerationMethods: methods}, } } diff --git a/backend/internal/pkg/geminicli/models.go b/backend/internal/pkg/geminicli/models.go index 922988c7..08e69886 100644 --- a/backend/internal/pkg/geminicli/models.go +++ b/backend/internal/pkg/geminicli/models.go @@ -12,10 +12,10 @@ type Model struct { // DefaultModels is the curated Gemini model list used by the admin UI "test account" flow. var DefaultModels = []Model{ {ID: "gemini-2.0-flash", Type: "model", DisplayName: "Gemini 2.0 Flash", CreatedAt: ""}, - {ID: "gemini-2.5-pro", Type: "model", DisplayName: "Gemini 2.5 Pro", CreatedAt: ""}, {ID: "gemini-2.5-flash", Type: "model", DisplayName: "Gemini 2.5 Flash", CreatedAt: ""}, - {ID: "gemini-3-pro-preview", Type: "model", DisplayName: "Gemini 3 Pro Preview", CreatedAt: ""}, + {ID: "gemini-2.5-pro", Type: "model", DisplayName: "Gemini 2.5 Pro", CreatedAt: ""}, {ID: "gemini-3-flash-preview", Type: "model", DisplayName: "Gemini 3 Flash Preview", CreatedAt: ""}, + {ID: "gemini-3-pro-preview", Type: "model", DisplayName: "Gemini 3 Pro Preview", CreatedAt: ""}, } // DefaultTestModel is the default model to preselect in test flows. diff --git a/backend/internal/pkg/oauth/oauth.go b/backend/internal/pkg/oauth/oauth.go index d29c2422..33caffd7 100644 --- a/backend/internal/pkg/oauth/oauth.go +++ b/backend/internal/pkg/oauth/oauth.go @@ -13,20 +13,26 @@ import ( "time" ) -// Claude OAuth Constants (from CRS project) +// Claude OAuth Constants const ( // OAuth Client ID for Claude ClientID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e" // OAuth endpoints AuthorizeURL = "https://claude.ai/oauth/authorize" - TokenURL = "https://console.anthropic.com/v1/oauth/token" - RedirectURI = "https://console.anthropic.com/oauth/code/callback" + TokenURL = "https://platform.claude.com/v1/oauth/token" + RedirectURI = "https://platform.claude.com/oauth/code/callback" - // Scopes - ScopeProfile = "user:profile" + // Scopes - Browser URL (includes org:create_api_key for user authorization) + ScopeOAuth = "org:create_api_key user:profile user:inference user:sessions:claude_code user:mcp_servers" + // Scopes - Internal API call (org:create_api_key not supported in API) + ScopeAPI = "user:profile user:inference user:sessions:claude_code user:mcp_servers" + // Scopes - Setup token (inference only) ScopeInference = "user:inference" + // Code Verifier character set (RFC 7636 compliant) + codeVerifierCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~" + // Session TTL SessionTTL = 30 * time.Minute ) @@ -53,7 +59,6 @@ func NewSessionStore() *SessionStore { sessions: make(map[string]*OAuthSession), stopCh: make(chan struct{}), } - // Start cleanup goroutine go store.cleanup() return store } @@ -78,7 +83,6 @@ func (s *SessionStore) Get(sessionID string) (*OAuthSession, bool) { if !ok { return nil, false } - // Check if expired if time.Since(session.CreatedAt) > SessionTTL { return nil, false } @@ -122,13 +126,13 @@ func GenerateRandomBytes(n int) ([]byte, error) { return b, nil } -// GenerateState generates a random state string for OAuth +// GenerateState generates a random state string for OAuth (base64url encoded) func GenerateState() (string, error) { bytes, err := GenerateRandomBytes(32) if err != nil { return "", err } - return hex.EncodeToString(bytes), nil + return base64URLEncode(bytes), nil } // GenerateSessionID generates a unique session ID @@ -140,13 +144,30 @@ func GenerateSessionID() (string, error) { return hex.EncodeToString(bytes), nil } -// GenerateCodeVerifier generates a PKCE code verifier (32 bytes -> base64url) +// GenerateCodeVerifier generates a PKCE code verifier using character set method func GenerateCodeVerifier() (string, error) { - bytes, err := GenerateRandomBytes(32) - if err != nil { - return "", err + const targetLen = 32 + charsetLen := len(codeVerifierCharset) + limit := 256 - (256 % charsetLen) + + result := make([]byte, 0, targetLen) + randBuf := make([]byte, targetLen*2) + + for len(result) < targetLen { + if _, err := rand.Read(randBuf); err != nil { + return "", err + } + for _, b := range randBuf { + if int(b) < limit { + result = append(result, codeVerifierCharset[int(b)%charsetLen]) + if len(result) >= targetLen { + break + } + } + } } - return base64URLEncode(bytes), nil + + return base64URLEncode(result), nil } // GenerateCodeChallenge generates a PKCE code challenge using S256 method @@ -158,42 +179,31 @@ func GenerateCodeChallenge(verifier string) string { // base64URLEncode encodes bytes to base64url without padding func base64URLEncode(data []byte) string { encoded := base64.URLEncoding.EncodeToString(data) - // Remove padding return strings.TrimRight(encoded, "=") } -// BuildAuthorizationURL builds the OAuth authorization URL +// BuildAuthorizationURL builds the OAuth authorization URL with correct parameter order func BuildAuthorizationURL(state, codeChallenge, scope string) string { - params := url.Values{} - params.Set("response_type", "code") - params.Set("client_id", ClientID) - params.Set("redirect_uri", RedirectURI) - params.Set("scope", scope) - params.Set("state", state) - params.Set("code_challenge", codeChallenge) - params.Set("code_challenge_method", "S256") + encodedRedirectURI := url.QueryEscape(RedirectURI) + encodedScope := strings.ReplaceAll(url.QueryEscape(scope), "%20", "+") - return fmt.Sprintf("%s?%s", AuthorizeURL, params.Encode()) -} - -// TokenRequest represents the token exchange request body -type TokenRequest struct { - GrantType string `json:"grant_type"` - ClientID string `json:"client_id"` - Code string `json:"code"` - RedirectURI string `json:"redirect_uri"` - CodeVerifier string `json:"code_verifier"` - State string `json:"state"` + return fmt.Sprintf("%s?code=true&client_id=%s&response_type=code&redirect_uri=%s&scope=%s&code_challenge=%s&code_challenge_method=S256&state=%s", + AuthorizeURL, + ClientID, + encodedRedirectURI, + encodedScope, + codeChallenge, + state, + ) } // TokenResponse represents the token response from OAuth provider type TokenResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int64 `json:"expires_in"` - RefreshToken string `json:"refresh_token,omitempty"` - Scope string `json:"scope,omitempty"` - // Organization and Account info from OAuth response + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + RefreshToken string `json:"refresh_token,omitempty"` + Scope string `json:"scope,omitempty"` Organization *OrgInfo `json:"organization,omitempty"` Account *AccountInfo `json:"account,omitempty"` } @@ -205,33 +215,6 @@ type OrgInfo struct { // AccountInfo represents account info from OAuth response type AccountInfo struct { - UUID string `json:"uuid"` -} - -// RefreshTokenRequest represents the refresh token request -type RefreshTokenRequest struct { - GrantType string `json:"grant_type"` - RefreshToken string `json:"refresh_token"` - ClientID string `json:"client_id"` -} - -// BuildTokenRequest creates a token exchange request -func BuildTokenRequest(code, codeVerifier, state string) *TokenRequest { - return &TokenRequest{ - GrantType: "authorization_code", - ClientID: ClientID, - Code: code, - RedirectURI: RedirectURI, - CodeVerifier: codeVerifier, - State: state, - } -} - -// BuildRefreshTokenRequest creates a refresh token request -func BuildRefreshTokenRequest(refreshToken string) *RefreshTokenRequest { - return &RefreshTokenRequest{ - GrantType: "refresh_token", - RefreshToken: refreshToken, - ClientID: ClientID, - } + UUID string `json:"uuid"` + EmailAddress string `json:"email_address"` } diff --git a/backend/internal/pkg/response/response.go b/backend/internal/pkg/response/response.go index a92ff9e8..c5b41d6e 100644 --- a/backend/internal/pkg/response/response.go +++ b/backend/internal/pkg/response/response.go @@ -2,6 +2,7 @@ package response import ( + "log" "math" "net/http" @@ -74,6 +75,12 @@ func ErrorFrom(c *gin.Context, err error) bool { } statusCode, status := infraerrors.ToHTTP(err) + + // Log internal errors with full details for debugging + if statusCode >= 500 && c.Request != nil { + log.Printf("[ERROR] %s %s\n Error: %s", c.Request.Method, c.Request.URL.Path, err.Error()) + } + ErrorWithDetails(c, statusCode, status.Message, status.Reason, status.Metadata) return true } @@ -162,11 +169,11 @@ func ParsePagination(c *gin.Context) (page, pageSize int) { // 支持 page_size 和 limit 两种参数名 if ps := c.Query("page_size"); ps != "" { - if val, err := parseInt(ps); err == nil && val > 0 && val <= 100 { + if val, err := parseInt(ps); err == nil && val > 0 && val <= 1000 { pageSize = val } } else if l := c.Query("limit"); l != "" { - if val, err := parseInt(l); err == nil && val > 0 && val <= 100 { + if val, err := parseInt(l); err == nil && val > 0 && val <= 1000 { pageSize = val } } diff --git a/backend/internal/pkg/tlsfingerprint/dialer.go b/backend/internal/pkg/tlsfingerprint/dialer.go new file mode 100644 index 00000000..42510986 --- /dev/null +++ b/backend/internal/pkg/tlsfingerprint/dialer.go @@ -0,0 +1,568 @@ +// Package tlsfingerprint provides TLS fingerprint simulation for HTTP clients. +// It uses the utls library to create TLS connections that mimic Node.js/Claude Code clients. +package tlsfingerprint + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "log/slog" + "net" + "net/http" + "net/url" + + utls "github.com/refraction-networking/utls" + "golang.org/x/net/proxy" +) + +// Profile contains TLS fingerprint configuration. +type Profile struct { + Name string // Profile name for identification + CipherSuites []uint16 + Curves []uint16 + PointFormats []uint8 + EnableGREASE bool +} + +// Dialer creates TLS connections with custom fingerprints. +type Dialer struct { + profile *Profile + baseDialer func(ctx context.Context, network, addr string) (net.Conn, error) +} + +// HTTPProxyDialer creates TLS connections through HTTP/HTTPS proxies with custom fingerprints. +// It handles the CONNECT tunnel establishment before performing TLS handshake. +type HTTPProxyDialer struct { + profile *Profile + proxyURL *url.URL +} + +// SOCKS5ProxyDialer creates TLS connections through SOCKS5 proxies with custom fingerprints. +// It uses golang.org/x/net/proxy to establish the SOCKS5 tunnel. +type SOCKS5ProxyDialer struct { + profile *Profile + proxyURL *url.URL +} + +// Default TLS fingerprint values captured from Claude CLI 2.x (Node.js 20.x + OpenSSL 3.x) +// Captured using: tshark -i lo -f "tcp port 8443" -Y "tls.handshake.type == 1" -V +// JA3 Hash: 1a28e69016765d92e3b381168d68922c +// +// Note: JA3/JA4 may have slight variations due to: +// - Session ticket presence/absence +// - Extension negotiation state +var ( + // defaultCipherSuites contains all 59 cipher suites from Claude CLI + // Order is critical for JA3 fingerprint matching + defaultCipherSuites = []uint16{ + // TLS 1.3 cipher suites (MUST be first) + 0x1302, // TLS_AES_256_GCM_SHA384 + 0x1303, // TLS_CHACHA20_POLY1305_SHA256 + 0x1301, // TLS_AES_128_GCM_SHA256 + + // ECDHE + AES-GCM + 0xc02f, // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + 0xc02b, // TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + 0xc030, // TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + 0xc02c, // TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + + // DHE + AES-GCM + 0x009e, // TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 + + // ECDHE/DHE + AES-CBC-SHA256/384 + 0xc027, // TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 + 0x0067, // TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 + 0xc028, // TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 + 0x006b, // TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 + + // DHE-DSS/RSA + AES-GCM + 0x00a3, // TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 + 0x009f, // TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 + + // ChaCha20-Poly1305 + 0xcca9, // TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + 0xcca8, // TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + 0xccaa, // TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + + // AES-CCM (256-bit) + 0xc0af, // TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 + 0xc0ad, // TLS_ECDHE_ECDSA_WITH_AES_256_CCM + 0xc0a3, // TLS_DHE_RSA_WITH_AES_256_CCM_8 + 0xc09f, // TLS_DHE_RSA_WITH_AES_256_CCM + + // ARIA (256-bit) + 0xc05d, // TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 + 0xc061, // TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 + 0xc057, // TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 + 0xc053, // TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 + + // DHE-DSS + AES-GCM (128-bit) + 0x00a2, // TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 + + // AES-CCM (128-bit) + 0xc0ae, // TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 + 0xc0ac, // TLS_ECDHE_ECDSA_WITH_AES_128_CCM + 0xc0a2, // TLS_DHE_RSA_WITH_AES_128_CCM_8 + 0xc09e, // TLS_DHE_RSA_WITH_AES_128_CCM + + // ARIA (128-bit) + 0xc05c, // TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 + 0xc060, // TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 + 0xc056, // TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 + 0xc052, // TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 + + // ECDHE/DHE + AES-CBC-SHA384/256 (more) + 0xc024, // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 + 0x006a, // TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 + 0xc023, // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 + 0x0040, // TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 + + // ECDHE/DHE + AES-CBC-SHA (legacy) + 0xc00a, // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA + 0xc014, // TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA + 0x0039, // TLS_DHE_RSA_WITH_AES_256_CBC_SHA + 0x0038, // TLS_DHE_DSS_WITH_AES_256_CBC_SHA + 0xc009, // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA + 0xc013, // TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA + 0x0033, // TLS_DHE_RSA_WITH_AES_128_CBC_SHA + 0x0032, // TLS_DHE_DSS_WITH_AES_128_CBC_SHA + + // RSA + AES-GCM/CCM/ARIA (non-PFS, 256-bit) + 0x009d, // TLS_RSA_WITH_AES_256_GCM_SHA384 + 0xc0a1, // TLS_RSA_WITH_AES_256_CCM_8 + 0xc09d, // TLS_RSA_WITH_AES_256_CCM + 0xc051, // TLS_RSA_WITH_ARIA_256_GCM_SHA384 + + // RSA + AES-GCM/CCM/ARIA (non-PFS, 128-bit) + 0x009c, // TLS_RSA_WITH_AES_128_GCM_SHA256 + 0xc0a0, // TLS_RSA_WITH_AES_128_CCM_8 + 0xc09c, // TLS_RSA_WITH_AES_128_CCM + 0xc050, // TLS_RSA_WITH_ARIA_128_GCM_SHA256 + + // RSA + AES-CBC (non-PFS, legacy) + 0x003d, // TLS_RSA_WITH_AES_256_CBC_SHA256 + 0x003c, // TLS_RSA_WITH_AES_128_CBC_SHA256 + 0x0035, // TLS_RSA_WITH_AES_256_CBC_SHA + 0x002f, // TLS_RSA_WITH_AES_128_CBC_SHA + + // Renegotiation indication + 0x00ff, // TLS_EMPTY_RENEGOTIATION_INFO_SCSV + } + + // defaultCurves contains the 10 supported groups from Claude CLI (including FFDHE) + defaultCurves = []utls.CurveID{ + utls.X25519, // 0x001d + utls.CurveP256, // 0x0017 (secp256r1) + utls.CurveID(0x001e), // x448 + utls.CurveP521, // 0x0019 (secp521r1) + utls.CurveP384, // 0x0018 (secp384r1) + utls.CurveID(0x0100), // ffdhe2048 + utls.CurveID(0x0101), // ffdhe3072 + utls.CurveID(0x0102), // ffdhe4096 + utls.CurveID(0x0103), // ffdhe6144 + utls.CurveID(0x0104), // ffdhe8192 + } + + // defaultPointFormats contains all 3 point formats from Claude CLI + defaultPointFormats = []uint8{ + 0, // uncompressed + 1, // ansiX962_compressed_prime + 2, // ansiX962_compressed_char2 + } + + // defaultSignatureAlgorithms contains the 20 signature algorithms from Claude CLI + defaultSignatureAlgorithms = []utls.SignatureScheme{ + 0x0403, // ecdsa_secp256r1_sha256 + 0x0503, // ecdsa_secp384r1_sha384 + 0x0603, // ecdsa_secp521r1_sha512 + 0x0807, // ed25519 + 0x0808, // ed448 + 0x0809, // rsa_pss_pss_sha256 + 0x080a, // rsa_pss_pss_sha384 + 0x080b, // rsa_pss_pss_sha512 + 0x0804, // rsa_pss_rsae_sha256 + 0x0805, // rsa_pss_rsae_sha384 + 0x0806, // rsa_pss_rsae_sha512 + 0x0401, // rsa_pkcs1_sha256 + 0x0501, // rsa_pkcs1_sha384 + 0x0601, // rsa_pkcs1_sha512 + 0x0303, // ecdsa_sha224 + 0x0301, // rsa_pkcs1_sha224 + 0x0302, // dsa_sha224 + 0x0402, // dsa_sha256 + 0x0502, // dsa_sha384 + 0x0602, // dsa_sha512 + } +) + +// NewDialer creates a new TLS fingerprint dialer. +// baseDialer is used for TCP connection establishment (supports proxy scenarios). +// If baseDialer is nil, direct TCP dial is used. +func NewDialer(profile *Profile, baseDialer func(ctx context.Context, network, addr string) (net.Conn, error)) *Dialer { + if baseDialer == nil { + baseDialer = (&net.Dialer{}).DialContext + } + return &Dialer{profile: profile, baseDialer: baseDialer} +} + +// NewHTTPProxyDialer creates a new TLS fingerprint dialer that works through HTTP/HTTPS proxies. +// It establishes a CONNECT tunnel before performing TLS handshake with custom fingerprint. +func NewHTTPProxyDialer(profile *Profile, proxyURL *url.URL) *HTTPProxyDialer { + return &HTTPProxyDialer{profile: profile, proxyURL: proxyURL} +} + +// NewSOCKS5ProxyDialer creates a new TLS fingerprint dialer that works through SOCKS5 proxies. +// It establishes a SOCKS5 tunnel before performing TLS handshake with custom fingerprint. +func NewSOCKS5ProxyDialer(profile *Profile, proxyURL *url.URL) *SOCKS5ProxyDialer { + return &SOCKS5ProxyDialer{profile: profile, proxyURL: proxyURL} +} + +// DialTLSContext establishes a TLS connection through SOCKS5 proxy with the configured fingerprint. +// Flow: SOCKS5 CONNECT to target -> TLS handshake with utls on the tunnel +func (d *SOCKS5ProxyDialer) DialTLSContext(ctx context.Context, network, addr string) (net.Conn, error) { + slog.Debug("tls_fingerprint_socks5_connecting", "proxy", d.proxyURL.Host, "target", addr) + + // Step 1: Create SOCKS5 dialer + var auth *proxy.Auth + if d.proxyURL.User != nil { + username := d.proxyURL.User.Username() + password, _ := d.proxyURL.User.Password() + auth = &proxy.Auth{ + User: username, + Password: password, + } + } + + // Determine proxy address + proxyAddr := d.proxyURL.Host + if d.proxyURL.Port() == "" { + proxyAddr = net.JoinHostPort(d.proxyURL.Hostname(), "1080") // Default SOCKS5 port + } + + socksDialer, err := proxy.SOCKS5("tcp", proxyAddr, auth, proxy.Direct) + if err != nil { + slog.Debug("tls_fingerprint_socks5_dialer_failed", "error", err) + return nil, fmt.Errorf("create SOCKS5 dialer: %w", err) + } + + // Step 2: Establish SOCKS5 tunnel to target + slog.Debug("tls_fingerprint_socks5_establishing_tunnel", "target", addr) + conn, err := socksDialer.Dial("tcp", addr) + if err != nil { + slog.Debug("tls_fingerprint_socks5_connect_failed", "error", err) + return nil, fmt.Errorf("SOCKS5 connect: %w", err) + } + slog.Debug("tls_fingerprint_socks5_tunnel_established") + + // Step 3: Perform TLS handshake on the tunnel with utls fingerprint + host, _, err := net.SplitHostPort(addr) + if err != nil { + host = addr + } + slog.Debug("tls_fingerprint_socks5_starting_handshake", "host", host) + + // Build ClientHello specification from profile (Node.js/Claude CLI fingerprint) + spec := buildClientHelloSpecFromProfile(d.profile) + slog.Debug("tls_fingerprint_socks5_clienthello_spec", + "cipher_suites", len(spec.CipherSuites), + "extensions", len(spec.Extensions), + "compression_methods", spec.CompressionMethods, + "tls_vers_max", fmt.Sprintf("0x%04x", spec.TLSVersMax), + "tls_vers_min", fmt.Sprintf("0x%04x", spec.TLSVersMin)) + + if d.profile != nil { + slog.Debug("tls_fingerprint_socks5_using_profile", "name", d.profile.Name, "grease", d.profile.EnableGREASE) + } + + // Create uTLS connection on the tunnel + tlsConn := utls.UClient(conn, &utls.Config{ + ServerName: host, + }, utls.HelloCustom) + + if err := tlsConn.ApplyPreset(spec); err != nil { + slog.Debug("tls_fingerprint_socks5_apply_preset_failed", "error", err) + _ = conn.Close() + return nil, fmt.Errorf("apply TLS preset: %w", err) + } + + if err := tlsConn.Handshake(); err != nil { + slog.Debug("tls_fingerprint_socks5_handshake_failed", "error", err) + _ = conn.Close() + return nil, fmt.Errorf("TLS handshake failed: %w", err) + } + + state := tlsConn.ConnectionState() + slog.Debug("tls_fingerprint_socks5_handshake_success", + "version", fmt.Sprintf("0x%04x", state.Version), + "cipher_suite", fmt.Sprintf("0x%04x", state.CipherSuite), + "alpn", state.NegotiatedProtocol) + + return tlsConn, nil +} + +// DialTLSContext establishes a TLS connection through HTTP proxy with the configured fingerprint. +// Flow: TCP connect to proxy -> CONNECT tunnel -> TLS handshake with utls +func (d *HTTPProxyDialer) DialTLSContext(ctx context.Context, network, addr string) (net.Conn, error) { + slog.Debug("tls_fingerprint_http_proxy_connecting", "proxy", d.proxyURL.Host, "target", addr) + + // Step 1: TCP connect to proxy server + var proxyAddr string + if d.proxyURL.Port() != "" { + proxyAddr = d.proxyURL.Host + } else { + // Default ports + if d.proxyURL.Scheme == "https" { + proxyAddr = net.JoinHostPort(d.proxyURL.Hostname(), "443") + } else { + proxyAddr = net.JoinHostPort(d.proxyURL.Hostname(), "80") + } + } + + dialer := &net.Dialer{} + conn, err := dialer.DialContext(ctx, "tcp", proxyAddr) + if err != nil { + slog.Debug("tls_fingerprint_http_proxy_connect_failed", "error", err) + return nil, fmt.Errorf("connect to proxy: %w", err) + } + slog.Debug("tls_fingerprint_http_proxy_connected", "proxy_addr", proxyAddr) + + // Step 2: Send CONNECT request to establish tunnel + req := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: make(http.Header), + } + + // Add proxy authentication if present + if d.proxyURL.User != nil { + username := d.proxyURL.User.Username() + password, _ := d.proxyURL.User.Password() + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + req.Header.Set("Proxy-Authorization", "Basic "+auth) + } + + slog.Debug("tls_fingerprint_http_proxy_sending_connect", "target", addr) + if err := req.Write(conn); err != nil { + _ = conn.Close() + slog.Debug("tls_fingerprint_http_proxy_write_failed", "error", err) + return nil, fmt.Errorf("write CONNECT request: %w", err) + } + + // Step 3: Read CONNECT response + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, req) + if err != nil { + _ = conn.Close() + slog.Debug("tls_fingerprint_http_proxy_read_response_failed", "error", err) + return nil, fmt.Errorf("read CONNECT response: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + _ = conn.Close() + slog.Debug("tls_fingerprint_http_proxy_connect_failed_status", "status_code", resp.StatusCode, "status", resp.Status) + return nil, fmt.Errorf("proxy CONNECT failed: %s", resp.Status) + } + slog.Debug("tls_fingerprint_http_proxy_tunnel_established") + + // Step 4: Perform TLS handshake on the tunnel with utls fingerprint + host, _, err := net.SplitHostPort(addr) + if err != nil { + host = addr + } + slog.Debug("tls_fingerprint_http_proxy_starting_handshake", "host", host) + + // Build ClientHello specification (reuse the shared method) + spec := buildClientHelloSpecFromProfile(d.profile) + slog.Debug("tls_fingerprint_http_proxy_clienthello_spec", + "cipher_suites", len(spec.CipherSuites), + "extensions", len(spec.Extensions)) + + if d.profile != nil { + slog.Debug("tls_fingerprint_http_proxy_using_profile", "name", d.profile.Name, "grease", d.profile.EnableGREASE) + } + + // Create uTLS connection on the tunnel + // Note: TLS 1.3 cipher suites are handled automatically by utls when TLS 1.3 is in SupportedVersions + tlsConn := utls.UClient(conn, &utls.Config{ + ServerName: host, + }, utls.HelloCustom) + + if err := tlsConn.ApplyPreset(spec); err != nil { + slog.Debug("tls_fingerprint_http_proxy_apply_preset_failed", "error", err) + _ = conn.Close() + return nil, fmt.Errorf("apply TLS preset: %w", err) + } + + if err := tlsConn.HandshakeContext(ctx); err != nil { + slog.Debug("tls_fingerprint_http_proxy_handshake_failed", "error", err) + _ = conn.Close() + return nil, fmt.Errorf("TLS handshake failed: %w", err) + } + + state := tlsConn.ConnectionState() + slog.Debug("tls_fingerprint_http_proxy_handshake_success", + "version", fmt.Sprintf("0x%04x", state.Version), + "cipher_suite", fmt.Sprintf("0x%04x", state.CipherSuite), + "alpn", state.NegotiatedProtocol) + + return tlsConn, nil +} + +// DialTLSContext establishes a TLS connection with the configured fingerprint. +// This method is designed to be used as http.Transport.DialTLSContext. +func (d *Dialer) DialTLSContext(ctx context.Context, network, addr string) (net.Conn, error) { + // Establish TCP connection using base dialer (supports proxy) + slog.Debug("tls_fingerprint_dialing_tcp", "addr", addr) + conn, err := d.baseDialer(ctx, network, addr) + if err != nil { + slog.Debug("tls_fingerprint_tcp_dial_failed", "error", err) + return nil, err + } + slog.Debug("tls_fingerprint_tcp_connected", "addr", addr) + + // Extract hostname for SNI + host, _, err := net.SplitHostPort(addr) + if err != nil { + host = addr + } + slog.Debug("tls_fingerprint_sni_hostname", "host", host) + + // Build ClientHello specification + spec := d.buildClientHelloSpec() + slog.Debug("tls_fingerprint_clienthello_spec", + "cipher_suites", len(spec.CipherSuites), + "extensions", len(spec.Extensions)) + + // Log profile info + if d.profile != nil { + slog.Debug("tls_fingerprint_using_profile", "name", d.profile.Name, "grease", d.profile.EnableGREASE) + } else { + slog.Debug("tls_fingerprint_using_default_profile") + } + + // Create uTLS connection + // Note: TLS 1.3 cipher suites are handled automatically by utls when TLS 1.3 is in SupportedVersions + tlsConn := utls.UClient(conn, &utls.Config{ + ServerName: host, + }, utls.HelloCustom) + + // Apply fingerprint + if err := tlsConn.ApplyPreset(spec); err != nil { + slog.Debug("tls_fingerprint_apply_preset_failed", "error", err) + _ = conn.Close() + return nil, err + } + slog.Debug("tls_fingerprint_preset_applied") + + // Perform TLS handshake + if err := tlsConn.HandshakeContext(ctx); err != nil { + slog.Debug("tls_fingerprint_handshake_failed", + "error", err, + "local_addr", conn.LocalAddr(), + "remote_addr", conn.RemoteAddr()) + _ = conn.Close() + return nil, fmt.Errorf("TLS handshake failed: %w", err) + } + + // Log successful handshake details + state := tlsConn.ConnectionState() + slog.Debug("tls_fingerprint_handshake_success", + "version", fmt.Sprintf("0x%04x", state.Version), + "cipher_suite", fmt.Sprintf("0x%04x", state.CipherSuite), + "alpn", state.NegotiatedProtocol) + + return tlsConn, nil +} + +// buildClientHelloSpec constructs the ClientHello specification based on the profile. +func (d *Dialer) buildClientHelloSpec() *utls.ClientHelloSpec { + return buildClientHelloSpecFromProfile(d.profile) +} + +// toUTLSCurves converts uint16 slice to utls.CurveID slice. +func toUTLSCurves(curves []uint16) []utls.CurveID { + result := make([]utls.CurveID, len(curves)) + for i, c := range curves { + result[i] = utls.CurveID(c) + } + return result +} + +// buildClientHelloSpecFromProfile constructs ClientHelloSpec from a Profile. +// This is a standalone function that can be used by both Dialer and HTTPProxyDialer. +func buildClientHelloSpecFromProfile(profile *Profile) *utls.ClientHelloSpec { + // Get cipher suites + var cipherSuites []uint16 + if profile != nil && len(profile.CipherSuites) > 0 { + cipherSuites = profile.CipherSuites + } else { + cipherSuites = defaultCipherSuites + } + + // Get curves + var curves []utls.CurveID + if profile != nil && len(profile.Curves) > 0 { + curves = toUTLSCurves(profile.Curves) + } else { + curves = defaultCurves + } + + // Get point formats + var pointFormats []uint8 + if profile != nil && len(profile.PointFormats) > 0 { + pointFormats = profile.PointFormats + } else { + pointFormats = defaultPointFormats + } + + // Check if GREASE is enabled + enableGREASE := profile != nil && profile.EnableGREASE + + extensions := make([]utls.TLSExtension, 0, 16) + + if enableGREASE { + extensions = append(extensions, &utls.UtlsGREASEExtension{}) + } + + // SNI extension - MUST be explicitly added for HelloCustom mode + // utls will populate the server name from Config.ServerName + extensions = append(extensions, &utls.SNIExtension{}) + + // Claude CLI extension order (captured from tshark): + // server_name(0), ec_point_formats(11), supported_groups(10), session_ticket(35), + // alpn(16), encrypt_then_mac(22), extended_master_secret(23), + // signature_algorithms(13), supported_versions(43), + // psk_key_exchange_modes(45), key_share(51) + extensions = append(extensions, + &utls.SupportedPointsExtension{SupportedPoints: pointFormats}, + &utls.SupportedCurvesExtension{Curves: curves}, + &utls.SessionTicketExtension{}, + &utls.ALPNExtension{AlpnProtocols: []string{"http/1.1"}}, + &utls.GenericExtension{Id: 22}, + &utls.ExtendedMasterSecretExtension{}, + &utls.SignatureAlgorithmsExtension{SupportedSignatureAlgorithms: defaultSignatureAlgorithms}, + &utls.SupportedVersionsExtension{Versions: []uint16{ + utls.VersionTLS13, + utls.VersionTLS12, + }}, + &utls.PSKKeyExchangeModesExtension{Modes: []uint8{utls.PskModeDHE}}, + &utls.KeyShareExtension{KeyShares: []utls.KeyShare{ + {Group: utls.X25519}, + }}, + ) + + if enableGREASE { + extensions = append(extensions, &utls.UtlsGREASEExtension{}) + } + + return &utls.ClientHelloSpec{ + CipherSuites: cipherSuites, + CompressionMethods: []uint8{0}, // null compression only (standard) + Extensions: extensions, + TLSVersMax: utls.VersionTLS13, + TLSVersMin: utls.VersionTLS10, + } +} diff --git a/backend/internal/pkg/tlsfingerprint/dialer_integration_test.go b/backend/internal/pkg/tlsfingerprint/dialer_integration_test.go new file mode 100644 index 00000000..eea74fcc --- /dev/null +++ b/backend/internal/pkg/tlsfingerprint/dialer_integration_test.go @@ -0,0 +1,278 @@ +//go:build integration + +// Package tlsfingerprint provides TLS fingerprint simulation for HTTP clients. +// +// Integration tests for verifying TLS fingerprint correctness. +// These tests make actual network requests to external services and should be run manually. +// +// Run with: go test -v -tags=integration ./internal/pkg/tlsfingerprint/... +package tlsfingerprint + +import ( + "context" + "encoding/json" + "io" + "net/http" + "strings" + "testing" + "time" +) + +// skipIfExternalServiceUnavailable checks if the external service is available. +// If not, it skips the test instead of failing. +func skipIfExternalServiceUnavailable(t *testing.T, err error) { + t.Helper() + if err != nil { + // Check for common network/TLS errors that indicate external service issues + errStr := err.Error() + if strings.Contains(errStr, "certificate has expired") || + strings.Contains(errStr, "certificate is not yet valid") || + strings.Contains(errStr, "connection refused") || + strings.Contains(errStr, "no such host") || + strings.Contains(errStr, "network is unreachable") || + strings.Contains(errStr, "timeout") { + t.Skipf("skipping test: external service unavailable: %v", err) + } + t.Fatalf("failed to get fingerprint: %v", err) + } +} + +// TestJA3Fingerprint verifies the JA3/JA4 fingerprint matches expected value. +// This test uses tls.peet.ws to verify the fingerprint. +// Expected JA3 hash: 1a28e69016765d92e3b381168d68922c (Claude CLI / Node.js 20.x) +// Expected JA4: t13d5911h1_a33745022dd6_1f22a2ca17c4 (d=domain) or t13i5911h1_... (i=IP) +func TestJA3Fingerprint(t *testing.T) { + // Skip if network is unavailable or if running in short mode + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + profile := &Profile{ + Name: "Claude CLI Test", + EnableGREASE: false, + } + dialer := NewDialer(profile, nil) + + client := &http.Client{ + Transport: &http.Transport{ + DialTLSContext: dialer.DialTLSContext, + }, + Timeout: 30 * time.Second, + } + + // Use tls.peet.ws fingerprint detection API + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", "https://tls.peet.ws/api/all", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + req.Header.Set("User-Agent", "Claude Code/2.0.0 Node.js/20.0.0") + + resp, err := client.Do(req) + skipIfExternalServiceUnavailable(t, err) + defer func() { _ = resp.Body.Close() }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response: %v", err) + } + + var fpResp FingerprintResponse + if err := json.Unmarshal(body, &fpResp); err != nil { + t.Logf("Response body: %s", string(body)) + t.Fatalf("failed to parse fingerprint response: %v", err) + } + + // Log all fingerprint information + t.Logf("JA3: %s", fpResp.TLS.JA3) + t.Logf("JA3 Hash: %s", fpResp.TLS.JA3Hash) + t.Logf("JA4: %s", fpResp.TLS.JA4) + t.Logf("PeetPrint: %s", fpResp.TLS.PeetPrint) + t.Logf("PeetPrint Hash: %s", fpResp.TLS.PeetPrintHash) + + // Verify JA3 hash matches expected value + expectedJA3Hash := "1a28e69016765d92e3b381168d68922c" + if fpResp.TLS.JA3Hash == expectedJA3Hash { + t.Logf("✓ JA3 hash matches expected value: %s", expectedJA3Hash) + } else { + t.Errorf("✗ JA3 hash mismatch: got %s, expected %s", fpResp.TLS.JA3Hash, expectedJA3Hash) + } + + // Verify JA4 fingerprint + // JA4 format: t[version][sni][cipher_count][ext_count][alpn]_[cipher_hash]_[ext_hash] + // Expected: t13d5910h1 (d=domain) or t13i5910h1 (i=IP) + // The suffix _a33745022dd6_1f22a2ca17c4 should match + expectedJA4Suffix := "_a33745022dd6_1f22a2ca17c4" + if strings.HasSuffix(fpResp.TLS.JA4, expectedJA4Suffix) { + t.Logf("✓ JA4 suffix matches expected value: %s", expectedJA4Suffix) + } else { + t.Errorf("✗ JA4 suffix mismatch: got %s, expected suffix %s", fpResp.TLS.JA4, expectedJA4Suffix) + } + + // Verify JA4 prefix (t13d5911h1 or t13i5911h1) + // d = domain (SNI present), i = IP (no SNI) + // Since we connect to tls.peet.ws (domain), we expect 'd' + expectedJA4Prefix := "t13d5911h1" + if strings.HasPrefix(fpResp.TLS.JA4, expectedJA4Prefix) { + t.Logf("✓ JA4 prefix matches: %s (t13=TLS1.3, d=domain, 59=ciphers, 11=extensions, h1=HTTP/1.1)", expectedJA4Prefix) + } else { + // Also accept 'i' variant for IP connections + altPrefix := "t13i5911h1" + if strings.HasPrefix(fpResp.TLS.JA4, altPrefix) { + t.Logf("✓ JA4 prefix matches (IP variant): %s", altPrefix) + } else { + t.Errorf("✗ JA4 prefix mismatch: got %s, expected %s or %s", fpResp.TLS.JA4, expectedJA4Prefix, altPrefix) + } + } + + // Verify JA3 contains expected cipher suites (TLS 1.3 ciphers at the beginning) + if strings.Contains(fpResp.TLS.JA3, "4866-4867-4865") { + t.Logf("✓ JA3 contains expected TLS 1.3 cipher suites") + } else { + t.Logf("Warning: JA3 does not contain expected TLS 1.3 cipher suites") + } + + // Verify extension list (should be 11 extensions including SNI) + // Expected: 0-11-10-35-16-22-23-13-43-45-51 + expectedExtensions := "0-11-10-35-16-22-23-13-43-45-51" + if strings.Contains(fpResp.TLS.JA3, expectedExtensions) { + t.Logf("✓ JA3 contains expected extension list: %s", expectedExtensions) + } else { + t.Logf("Warning: JA3 extension list may differ") + } +} + +// TestProfileExpectation defines expected fingerprint values for a profile. +type TestProfileExpectation struct { + Profile *Profile + ExpectedJA3 string // Expected JA3 hash (empty = don't check) + ExpectedJA4 string // Expected full JA4 (empty = don't check) + JA4CipherHash string // Expected JA4 cipher hash - the stable middle part (empty = don't check) +} + +// TestAllProfiles tests multiple TLS fingerprint profiles against tls.peet.ws. +// Run with: go test -v -tags=integration -run TestAllProfiles ./internal/pkg/tlsfingerprint/... +func TestAllProfiles(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Define all profiles to test with their expected fingerprints + // These profiles are from config.yaml gateway.tls_fingerprint.profiles + profiles := []TestProfileExpectation{ + { + // Linux x64 Node.js v22.17.1 + // Expected JA3 Hash: 1a28e69016765d92e3b381168d68922c + // Expected JA4: t13d5911h1_a33745022dd6_1f22a2ca17c4 + Profile: &Profile{ + Name: "linux_x64_node_v22171", + EnableGREASE: false, + CipherSuites: []uint16{4866, 4867, 4865, 49199, 49195, 49200, 49196, 158, 49191, 103, 49192, 107, 163, 159, 52393, 52392, 52394, 49327, 49325, 49315, 49311, 49245, 49249, 49239, 49235, 162, 49326, 49324, 49314, 49310, 49244, 49248, 49238, 49234, 49188, 106, 49187, 64, 49162, 49172, 57, 56, 49161, 49171, 51, 50, 157, 49313, 49309, 49233, 156, 49312, 49308, 49232, 61, 60, 53, 47, 255}, + Curves: []uint16{29, 23, 30, 25, 24, 256, 257, 258, 259, 260}, + PointFormats: []uint8{0, 1, 2}, + }, + JA4CipherHash: "a33745022dd6", // stable part + }, + { + // MacOS arm64 Node.js v22.18.0 + // Expected JA3 Hash: 70cb5ca646080902703ffda87036a5ea + // Expected JA4: t13d5912h1_a33745022dd6_dbd39dd1d406 + Profile: &Profile{ + Name: "macos_arm64_node_v22180", + EnableGREASE: false, + CipherSuites: []uint16{4866, 4867, 4865, 49199, 49195, 49200, 49196, 158, 49191, 103, 49192, 107, 163, 159, 52393, 52392, 52394, 49327, 49325, 49315, 49311, 49245, 49249, 49239, 49235, 162, 49326, 49324, 49314, 49310, 49244, 49248, 49238, 49234, 49188, 106, 49187, 64, 49162, 49172, 57, 56, 49161, 49171, 51, 50, 157, 49313, 49309, 49233, 156, 49312, 49308, 49232, 61, 60, 53, 47, 255}, + Curves: []uint16{29, 23, 30, 25, 24, 256, 257, 258, 259, 260}, + PointFormats: []uint8{0, 1, 2}, + }, + JA4CipherHash: "a33745022dd6", // stable part (same cipher suites) + }, + } + + for _, tc := range profiles { + tc := tc // capture range variable + t.Run(tc.Profile.Name, func(t *testing.T) { + fp := fetchFingerprint(t, tc.Profile) + if fp == nil { + return // fetchFingerprint already called t.Fatal + } + + t.Logf("Profile: %s", tc.Profile.Name) + t.Logf(" JA3: %s", fp.JA3) + t.Logf(" JA3 Hash: %s", fp.JA3Hash) + t.Logf(" JA4: %s", fp.JA4) + t.Logf(" PeetPrint: %s", fp.PeetPrint) + t.Logf(" PeetPrintHash: %s", fp.PeetPrintHash) + + // Verify expectations + if tc.ExpectedJA3 != "" { + if fp.JA3Hash == tc.ExpectedJA3 { + t.Logf(" ✓ JA3 hash matches: %s", tc.ExpectedJA3) + } else { + t.Errorf(" ✗ JA3 hash mismatch: got %s, expected %s", fp.JA3Hash, tc.ExpectedJA3) + } + } + + if tc.ExpectedJA4 != "" { + if fp.JA4 == tc.ExpectedJA4 { + t.Logf(" ✓ JA4 matches: %s", tc.ExpectedJA4) + } else { + t.Errorf(" ✗ JA4 mismatch: got %s, expected %s", fp.JA4, tc.ExpectedJA4) + } + } + + // Check JA4 cipher hash (stable middle part) + // JA4 format: prefix_cipherHash_extHash + if tc.JA4CipherHash != "" { + if strings.Contains(fp.JA4, "_"+tc.JA4CipherHash+"_") { + t.Logf(" ✓ JA4 cipher hash matches: %s", tc.JA4CipherHash) + } else { + t.Errorf(" ✗ JA4 cipher hash mismatch: got %s, expected cipher hash %s", fp.JA4, tc.JA4CipherHash) + } + } + }) + } +} + +// fetchFingerprint makes a request to tls.peet.ws and returns the TLS fingerprint info. +func fetchFingerprint(t *testing.T, profile *Profile) *TLSInfo { + t.Helper() + + dialer := NewDialer(profile, nil) + client := &http.Client{ + Transport: &http.Transport{ + DialTLSContext: dialer.DialTLSContext, + }, + Timeout: 30 * time.Second, + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", "https://tls.peet.ws/api/all", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + return nil + } + req.Header.Set("User-Agent", "Claude Code/2.0.0 Node.js/20.0.0") + + resp, err := client.Do(req) + skipIfExternalServiceUnavailable(t, err) + defer func() { _ = resp.Body.Close() }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response: %v", err) + return nil + } + + var fpResp FingerprintResponse + if err := json.Unmarshal(body, &fpResp); err != nil { + t.Logf("Response body: %s", string(body)) + t.Fatalf("failed to parse fingerprint response: %v", err) + return nil + } + + return &fpResp.TLS +} diff --git a/backend/internal/pkg/tlsfingerprint/dialer_test.go b/backend/internal/pkg/tlsfingerprint/dialer_test.go new file mode 100644 index 00000000..dff7570f --- /dev/null +++ b/backend/internal/pkg/tlsfingerprint/dialer_test.go @@ -0,0 +1,160 @@ +// Package tlsfingerprint provides TLS fingerprint simulation for HTTP clients. +// +// Unit tests for TLS fingerprint dialer. +// Integration tests that require external network are in dialer_integration_test.go +// and require the 'integration' build tag. +// +// Run unit tests: go test -v ./internal/pkg/tlsfingerprint/... +// Run integration tests: go test -v -tags=integration ./internal/pkg/tlsfingerprint/... +package tlsfingerprint + +import ( + "net/url" + "testing" +) + +// FingerprintResponse represents the response from tls.peet.ws/api/all. +type FingerprintResponse struct { + IP string `json:"ip"` + TLS TLSInfo `json:"tls"` + HTTP2 any `json:"http2"` +} + +// TLSInfo contains TLS fingerprint details. +type TLSInfo struct { + JA3 string `json:"ja3"` + JA3Hash string `json:"ja3_hash"` + JA4 string `json:"ja4"` + PeetPrint string `json:"peetprint"` + PeetPrintHash string `json:"peetprint_hash"` + ClientRandom string `json:"client_random"` + SessionID string `json:"session_id"` +} + +// TestDialerWithProfile tests that different profiles produce different fingerprints. +func TestDialerWithProfile(t *testing.T) { + // Create two dialers with different profiles + profile1 := &Profile{ + Name: "Profile 1 - No GREASE", + EnableGREASE: false, + } + profile2 := &Profile{ + Name: "Profile 2 - With GREASE", + EnableGREASE: true, + } + + dialer1 := NewDialer(profile1, nil) + dialer2 := NewDialer(profile2, nil) + + // Build specs and compare + // Note: We can't directly compare JA3 without making network requests + // but we can verify the specs are different + spec1 := dialer1.buildClientHelloSpec() + spec2 := dialer2.buildClientHelloSpec() + + // Profile with GREASE should have more extensions + if len(spec2.Extensions) <= len(spec1.Extensions) { + t.Error("expected GREASE profile to have more extensions") + } +} + +// TestHTTPProxyDialerBasic tests HTTP proxy dialer creation. +// Note: This is a unit test - actual proxy testing requires a proxy server. +func TestHTTPProxyDialerBasic(t *testing.T) { + profile := &Profile{ + Name: "Test Profile", + EnableGREASE: false, + } + + // Test that dialer is created without panic + proxyURL := mustParseURL("http://proxy.example.com:8080") + dialer := NewHTTPProxyDialer(profile, proxyURL) + + if dialer == nil { + t.Fatal("expected dialer to be created") + } + if dialer.profile != profile { + t.Error("expected profile to be set") + } + if dialer.proxyURL != proxyURL { + t.Error("expected proxyURL to be set") + } +} + +// TestSOCKS5ProxyDialerBasic tests SOCKS5 proxy dialer creation. +// Note: This is a unit test - actual proxy testing requires a proxy server. +func TestSOCKS5ProxyDialerBasic(t *testing.T) { + profile := &Profile{ + Name: "Test Profile", + EnableGREASE: false, + } + + // Test that dialer is created without panic + proxyURL := mustParseURL("socks5://proxy.example.com:1080") + dialer := NewSOCKS5ProxyDialer(profile, proxyURL) + + if dialer == nil { + t.Fatal("expected dialer to be created") + } + if dialer.profile != profile { + t.Error("expected profile to be set") + } + if dialer.proxyURL != proxyURL { + t.Error("expected proxyURL to be set") + } +} + +// TestBuildClientHelloSpec tests ClientHello spec construction. +func TestBuildClientHelloSpec(t *testing.T) { + // Test with nil profile (should use defaults) + spec := buildClientHelloSpecFromProfile(nil) + + if len(spec.CipherSuites) == 0 { + t.Error("expected cipher suites to be set") + } + if len(spec.Extensions) == 0 { + t.Error("expected extensions to be set") + } + + // Verify default cipher suites are used + if len(spec.CipherSuites) != len(defaultCipherSuites) { + t.Errorf("expected %d cipher suites, got %d", len(defaultCipherSuites), len(spec.CipherSuites)) + } + + // Test with custom profile + customProfile := &Profile{ + Name: "Custom", + EnableGREASE: false, + CipherSuites: []uint16{0x1301, 0x1302}, + } + spec = buildClientHelloSpecFromProfile(customProfile) + + if len(spec.CipherSuites) != 2 { + t.Errorf("expected 2 cipher suites, got %d", len(spec.CipherSuites)) + } +} + +// TestToUTLSCurves tests curve ID conversion. +func TestToUTLSCurves(t *testing.T) { + input := []uint16{0x001d, 0x0017, 0x0018} + result := toUTLSCurves(input) + + if len(result) != len(input) { + t.Errorf("expected %d curves, got %d", len(input), len(result)) + } + + for i, curve := range result { + if uint16(curve) != input[i] { + t.Errorf("curve %d: expected 0x%04x, got 0x%04x", i, input[i], uint16(curve)) + } + } +} + +// Helper function to parse URL without error handling. +func mustParseURL(rawURL string) *url.URL { + u, err := url.Parse(rawURL) + if err != nil { + panic(err) + } + return u +} diff --git a/backend/internal/pkg/tlsfingerprint/registry.go b/backend/internal/pkg/tlsfingerprint/registry.go new file mode 100644 index 00000000..6e9dc539 --- /dev/null +++ b/backend/internal/pkg/tlsfingerprint/registry.go @@ -0,0 +1,171 @@ +// Package tlsfingerprint provides TLS fingerprint simulation for HTTP clients. +package tlsfingerprint + +import ( + "log/slog" + "sort" + "sync" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +// DefaultProfileName is the name of the built-in Claude CLI profile. +const DefaultProfileName = "claude_cli_v2" + +// Registry manages TLS fingerprint profiles. +// It holds a collection of profiles that can be used for TLS fingerprint simulation. +// Profiles are selected based on account ID using modulo operation. +type Registry struct { + mu sync.RWMutex + profiles map[string]*Profile + profileNames []string // Sorted list of profile names for deterministic selection +} + +// NewRegistry creates a new TLS fingerprint profile registry. +// It initializes with the built-in default profile. +func NewRegistry() *Registry { + r := &Registry{ + profiles: make(map[string]*Profile), + profileNames: make([]string, 0), + } + + // Register the built-in default profile + r.registerBuiltinProfile() + + return r +} + +// NewRegistryFromConfig creates a new registry and loads profiles from config. +// If the config has custom profiles defined, they will be merged with the built-in default. +func NewRegistryFromConfig(cfg *config.TLSFingerprintConfig) *Registry { + r := NewRegistry() + + if cfg == nil || !cfg.Enabled { + slog.Debug("tls_registry_disabled", "reason", "disabled or no config") + return r + } + + // Load custom profiles from config + for name, profileCfg := range cfg.Profiles { + profile := &Profile{ + Name: profileCfg.Name, + EnableGREASE: profileCfg.EnableGREASE, + CipherSuites: profileCfg.CipherSuites, + Curves: profileCfg.Curves, + PointFormats: profileCfg.PointFormats, + } + + // If the profile has empty values, they will use defaults in dialer + r.RegisterProfile(name, profile) + slog.Debug("tls_registry_loaded_profile", "key", name, "name", profileCfg.Name) + } + + slog.Debug("tls_registry_initialized", "profile_count", len(r.profileNames), "profiles", r.profileNames) + return r +} + +// registerBuiltinProfile adds the default Claude CLI profile to the registry. +func (r *Registry) registerBuiltinProfile() { + defaultProfile := &Profile{ + Name: "Claude CLI 2.x (Node.js 20.x + OpenSSL 3.x)", + EnableGREASE: false, // Node.js does not use GREASE + // Empty slices will cause dialer to use built-in defaults + CipherSuites: nil, + Curves: nil, + PointFormats: nil, + } + r.RegisterProfile(DefaultProfileName, defaultProfile) +} + +// RegisterProfile adds or updates a profile in the registry. +func (r *Registry) RegisterProfile(name string, profile *Profile) { + r.mu.Lock() + defer r.mu.Unlock() + + // Check if this is a new profile + _, exists := r.profiles[name] + r.profiles[name] = profile + + if !exists { + r.profileNames = append(r.profileNames, name) + // Keep names sorted for deterministic selection + sort.Strings(r.profileNames) + } +} + +// GetProfile returns a profile by name. +// Returns nil if the profile does not exist. +func (r *Registry) GetProfile(name string) *Profile { + r.mu.RLock() + defer r.mu.RUnlock() + return r.profiles[name] +} + +// GetDefaultProfile returns the built-in default profile. +func (r *Registry) GetDefaultProfile() *Profile { + return r.GetProfile(DefaultProfileName) +} + +// GetProfileByAccountID returns a profile for the given account ID. +// The profile is selected using: profileNames[accountID % len(profiles)] +// This ensures deterministic profile assignment for each account. +func (r *Registry) GetProfileByAccountID(accountID int64) *Profile { + r.mu.RLock() + defer r.mu.RUnlock() + + if len(r.profileNames) == 0 { + return nil + } + + // Use modulo to select profile index + // Use absolute value to handle negative IDs (though unlikely) + idx := accountID + if idx < 0 { + idx = -idx + } + selectedIndex := int(idx % int64(len(r.profileNames))) + selectedName := r.profileNames[selectedIndex] + + return r.profiles[selectedName] +} + +// ProfileCount returns the number of registered profiles. +func (r *Registry) ProfileCount() int { + r.mu.RLock() + defer r.mu.RUnlock() + return len(r.profiles) +} + +// ProfileNames returns a sorted list of all registered profile names. +func (r *Registry) ProfileNames() []string { + r.mu.RLock() + defer r.mu.RUnlock() + + // Return a copy to prevent modification + names := make([]string, len(r.profileNames)) + copy(names, r.profileNames) + return names +} + +// Global registry instance for convenience +var globalRegistry *Registry +var globalRegistryOnce sync.Once + +// GlobalRegistry returns the global TLS fingerprint registry. +// The registry is lazily initialized with the default profile. +func GlobalRegistry() *Registry { + globalRegistryOnce.Do(func() { + globalRegistry = NewRegistry() + }) + return globalRegistry +} + +// InitGlobalRegistry initializes the global registry with configuration. +// This should be called during application startup. +// It is safe to call multiple times; subsequent calls will update the registry. +func InitGlobalRegistry(cfg *config.TLSFingerprintConfig) *Registry { + globalRegistryOnce.Do(func() { + globalRegistry = NewRegistryFromConfig(cfg) + }) + return globalRegistry +} diff --git a/backend/internal/pkg/tlsfingerprint/registry_test.go b/backend/internal/pkg/tlsfingerprint/registry_test.go new file mode 100644 index 00000000..752ba0cc --- /dev/null +++ b/backend/internal/pkg/tlsfingerprint/registry_test.go @@ -0,0 +1,243 @@ +package tlsfingerprint + +import ( + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +func TestNewRegistry(t *testing.T) { + r := NewRegistry() + + // Should have exactly one profile (the default) + if r.ProfileCount() != 1 { + t.Errorf("expected 1 profile, got %d", r.ProfileCount()) + } + + // Should have the default profile + profile := r.GetDefaultProfile() + if profile == nil { + t.Error("expected default profile to exist") + } + + // Default profile name should be in the list + names := r.ProfileNames() + if len(names) != 1 || names[0] != DefaultProfileName { + t.Errorf("expected profile names to be [%s], got %v", DefaultProfileName, names) + } +} + +func TestRegisterProfile(t *testing.T) { + r := NewRegistry() + + // Register a new profile + customProfile := &Profile{ + Name: "Custom Profile", + EnableGREASE: true, + } + r.RegisterProfile("custom", customProfile) + + // Should now have 2 profiles + if r.ProfileCount() != 2 { + t.Errorf("expected 2 profiles, got %d", r.ProfileCount()) + } + + // Should be able to retrieve the custom profile + retrieved := r.GetProfile("custom") + if retrieved == nil { + t.Fatal("expected custom profile to exist") + } + if retrieved.Name != "Custom Profile" { + t.Errorf("expected profile name 'Custom Profile', got '%s'", retrieved.Name) + } + if !retrieved.EnableGREASE { + t.Error("expected EnableGREASE to be true") + } +} + +func TestGetProfile(t *testing.T) { + r := NewRegistry() + + // Get existing profile + profile := r.GetProfile(DefaultProfileName) + if profile == nil { + t.Error("expected default profile to exist") + } + + // Get non-existing profile + nonExistent := r.GetProfile("nonexistent") + if nonExistent != nil { + t.Error("expected nil for non-existent profile") + } +} + +func TestGetProfileByAccountID(t *testing.T) { + r := NewRegistry() + + // With only default profile, all account IDs should return the same profile + for i := int64(0); i < 10; i++ { + profile := r.GetProfileByAccountID(i) + if profile == nil { + t.Errorf("expected profile for account %d, got nil", i) + } + } + + // Add more profiles + r.RegisterProfile("profile_a", &Profile{Name: "Profile A"}) + r.RegisterProfile("profile_b", &Profile{Name: "Profile B"}) + + // Now we have 3 profiles: claude_cli_v2, profile_a, profile_b + // Names are sorted, so order is: claude_cli_v2, profile_a, profile_b + expectedOrder := []string{DefaultProfileName, "profile_a", "profile_b"} + names := r.ProfileNames() + for i, name := range expectedOrder { + if names[i] != name { + t.Errorf("expected name at index %d to be %s, got %s", i, name, names[i]) + } + } + + // Test modulo selection + // Account ID 0 % 3 = 0 -> claude_cli_v2 + // Account ID 1 % 3 = 1 -> profile_a + // Account ID 2 % 3 = 2 -> profile_b + // Account ID 3 % 3 = 0 -> claude_cli_v2 + testCases := []struct { + accountID int64 + expectedName string + }{ + {0, "Claude CLI 2.x (Node.js 20.x + OpenSSL 3.x)"}, + {1, "Profile A"}, + {2, "Profile B"}, + {3, "Claude CLI 2.x (Node.js 20.x + OpenSSL 3.x)"}, + {4, "Profile A"}, + {5, "Profile B"}, + {100, "Profile A"}, // 100 % 3 = 1 + {-1, "Profile A"}, // |-1| % 3 = 1 + {-3, "Claude CLI 2.x (Node.js 20.x + OpenSSL 3.x)"}, // |-3| % 3 = 0 + } + + for _, tc := range testCases { + profile := r.GetProfileByAccountID(tc.accountID) + if profile == nil { + t.Errorf("expected profile for account %d, got nil", tc.accountID) + continue + } + if profile.Name != tc.expectedName { + t.Errorf("account %d: expected profile name '%s', got '%s'", tc.accountID, tc.expectedName, profile.Name) + } + } +} + +func TestNewRegistryFromConfig(t *testing.T) { + // Test with nil config + r := NewRegistryFromConfig(nil) + if r.ProfileCount() != 1 { + t.Errorf("expected 1 profile with nil config, got %d", r.ProfileCount()) + } + + // Test with disabled config + disabledCfg := &config.TLSFingerprintConfig{ + Enabled: false, + } + r = NewRegistryFromConfig(disabledCfg) + if r.ProfileCount() != 1 { + t.Errorf("expected 1 profile with disabled config, got %d", r.ProfileCount()) + } + + // Test with enabled config and custom profiles + enabledCfg := &config.TLSFingerprintConfig{ + Enabled: true, + Profiles: map[string]config.TLSProfileConfig{ + "custom1": { + Name: "Custom Profile 1", + EnableGREASE: true, + }, + "custom2": { + Name: "Custom Profile 2", + EnableGREASE: false, + }, + }, + } + r = NewRegistryFromConfig(enabledCfg) + + // Should have 3 profiles: default + 2 custom + if r.ProfileCount() != 3 { + t.Errorf("expected 3 profiles, got %d", r.ProfileCount()) + } + + // Check custom profiles exist + custom1 := r.GetProfile("custom1") + if custom1 == nil || custom1.Name != "Custom Profile 1" { + t.Error("expected custom1 profile to exist with correct name") + } + custom2 := r.GetProfile("custom2") + if custom2 == nil || custom2.Name != "Custom Profile 2" { + t.Error("expected custom2 profile to exist with correct name") + } +} + +func TestProfileNames(t *testing.T) { + r := NewRegistry() + + // Add profiles in non-alphabetical order + r.RegisterProfile("zebra", &Profile{Name: "Zebra"}) + r.RegisterProfile("alpha", &Profile{Name: "Alpha"}) + r.RegisterProfile("beta", &Profile{Name: "Beta"}) + + names := r.ProfileNames() + + // Should be sorted alphabetically + expected := []string{"alpha", "beta", DefaultProfileName, "zebra"} + if len(names) != len(expected) { + t.Errorf("expected %d names, got %d", len(expected), len(names)) + } + for i, name := range expected { + if names[i] != name { + t.Errorf("expected name at index %d to be %s, got %s", i, name, names[i]) + } + } + + // Test that returned slice is a copy (modifying it shouldn't affect registry) + names[0] = "modified" + originalNames := r.ProfileNames() + if originalNames[0] == "modified" { + t.Error("modifying returned slice should not affect registry") + } +} + +func TestConcurrentAccess(t *testing.T) { + r := NewRegistry() + + // Run concurrent reads and writes + done := make(chan bool) + + // Writers + for i := 0; i < 10; i++ { + go func(id int) { + for j := 0; j < 100; j++ { + r.RegisterProfile("concurrent"+string(rune('0'+id)), &Profile{Name: "Concurrent"}) + } + done <- true + }(i) + } + + // Readers + for i := 0; i < 10; i++ { + go func(id int) { + for j := 0; j < 100; j++ { + _ = r.ProfileCount() + _ = r.ProfileNames() + _ = r.GetProfileByAccountID(int64(id * j)) + _ = r.GetProfile(DefaultProfileName) + } + done <- true + }(i) + } + + // Wait for all goroutines + for i := 0; i < 20; i++ { + <-done + } + + // Test should pass without data races (run with -race flag) +} diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 73f1cd97..96671ca3 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -39,9 +39,15 @@ import ( // 设计说明: // - client: Ent 客户端,用于类型安全的 ORM 操作 // - sql: 原生 SQL 执行器,用于复杂查询和批量操作 +// - schedulerCache: 调度器缓存,用于在账号状态变更时同步快照 type accountRepository struct { client *dbent.Client // Ent ORM 客户端 sql sqlExecutor // 原生 SQL 执行接口 + // schedulerCache 用于在账号状态变更时主动同步快照到缓存, + // 确保粘性会话能及时感知账号不可用状态。 + // Used to proactively sync account snapshot to cache when status changes, + // ensuring sticky sessions can promptly detect unavailable accounts. + schedulerCache service.SchedulerCache } type tempUnschedSnapshot struct { @@ -51,14 +57,14 @@ type tempUnschedSnapshot struct { // NewAccountRepository 创建账户仓储实例。 // 这是对外暴露的构造函数,返回接口类型以便于依赖注入。 -func NewAccountRepository(client *dbent.Client, sqlDB *sql.DB) service.AccountRepository { - return newAccountRepositoryWithSQL(client, sqlDB) +func NewAccountRepository(client *dbent.Client, sqlDB *sql.DB, schedulerCache service.SchedulerCache) service.AccountRepository { + return newAccountRepositoryWithSQL(client, sqlDB, schedulerCache) } // newAccountRepositoryWithSQL 是内部构造函数,支持依赖注入 SQL 执行器。 // 这种设计便于单元测试时注入 mock 对象。 -func newAccountRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *accountRepository { - return &accountRepository{client: client, sql: sqlq} +func newAccountRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor, schedulerCache service.SchedulerCache) *accountRepository { + return &accountRepository{client: client, sql: sqlq, schedulerCache: schedulerCache} } func (r *accountRepository) Create(ctx context.Context, account *service.Account) error { @@ -356,6 +362,9 @@ func (r *accountRepository) Update(ctx context.Context, account *service.Account if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &account.ID, nil, buildSchedulerGroupPayload(account.GroupIDs)); err != nil { log.Printf("[SchedulerOutbox] enqueue account update failed: account=%d err=%v", account.ID, err) } + if account.Status == service.StatusError || account.Status == service.StatusDisabled || !account.Schedulable { + r.syncSchedulerAccountSnapshot(ctx, account.ID) + } return nil } @@ -473,37 +482,6 @@ func (r *accountRepository) ListByPlatform(ctx context.Context, platform string) return r.accountsToService(ctx, accounts) } -func (r *accountRepository) ListByPlatformAndCredentialEmails( - ctx context.Context, - platform string, - emails []string, -) ([]service.Account, error) { - if len(emails) == 0 { - return []service.Account{}, nil - } - args := make([]any, 0, len(emails)) - for _, email := range emails { - if email == "" { - continue - } - args = append(args, email) - } - if len(args) == 0 { - return []service.Account{}, nil - } - - accounts, err := r.client.Account.Query(). - Where(dbaccount.PlatformEQ(platform)). - Where(func(s *entsql.Selector) { - s.Where(sqljson.ValueIn(dbaccount.FieldCredentials, args, sqljson.Path("email"))) - }). - All(ctx) - if err != nil { - return nil, err - } - return r.accountsToService(ctx, accounts) -} - func (r *accountRepository) UpdateLastUsed(ctx context.Context, id int64) error { now := time.Now() _, err := r.client.Account.Update(). @@ -571,9 +549,32 @@ func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg str if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { log.Printf("[SchedulerOutbox] enqueue set error failed: account=%d err=%v", id, err) } + r.syncSchedulerAccountSnapshot(ctx, id) return nil } +// syncSchedulerAccountSnapshot 在账号状态变更时主动同步快照到调度器缓存。 +// 当账号被设置为错误、禁用、不可调度或临时不可调度时调用, +// 确保调度器和粘性会话逻辑能及时感知账号的最新状态,避免继续使用不可用账号。 +// +// syncSchedulerAccountSnapshot proactively syncs account snapshot to scheduler cache +// when account status changes. Called when account is set to error, disabled, +// unschedulable, or temporarily unschedulable, ensuring scheduler and sticky session +// logic can promptly detect the latest account state and avoid using unavailable accounts. +func (r *accountRepository) syncSchedulerAccountSnapshot(ctx context.Context, accountID int64) { + if r == nil || r.schedulerCache == nil || accountID <= 0 { + return + } + account, err := r.GetByID(ctx, accountID) + if err != nil { + log.Printf("[Scheduler] sync account snapshot read failed: id=%d err=%v", accountID, err) + return + } + if err := r.schedulerCache.SetAccount(ctx, account); err != nil { + log.Printf("[Scheduler] sync account snapshot write failed: id=%d err=%v", accountID, err) + } +} + func (r *accountRepository) ClearError(ctx context.Context, id int64) error { _, err := r.client.Account.Update(). Where(dbaccount.IDEQ(id)). @@ -787,7 +788,6 @@ func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetA Where(dbaccount.IDEQ(id)). SetRateLimitedAt(now). SetRateLimitResetAt(resetAt). - SetLastUsedAt(now). Save(ctx) if err != nil { return err @@ -922,6 +922,7 @@ func (r *accountRepository) SetTempUnschedulable(ctx context.Context, id int64, if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { log.Printf("[SchedulerOutbox] enqueue temp unschedulable failed: account=%d err=%v", id, err) } + r.syncSchedulerAccountSnapshot(ctx, id) return nil } @@ -1018,7 +1019,16 @@ func (r *accountRepository) UpdateSessionWindow(ctx context.Context, id int64, s builder.SetSessionWindowEnd(*end) } _, err := builder.Save(ctx) - return err + if err != nil { + return err + } + // 触发调度器缓存更新(仅当窗口时间有变化时) + if start != nil || end != nil { + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue session window update failed: account=%d err=%v", id, err) + } + } + return nil } func (r *accountRepository) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { @@ -1032,6 +1042,9 @@ func (r *accountRepository) SetSchedulable(ctx context.Context, id int64, schedu if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { log.Printf("[SchedulerOutbox] enqueue schedulable change failed: account=%d err=%v", id, err) } + if !schedulable { + r.syncSchedulerAccountSnapshot(ctx, id) + } return nil } @@ -1186,6 +1199,18 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountBulkChanged, nil, nil, payload); err != nil { log.Printf("[SchedulerOutbox] enqueue bulk update failed: err=%v", err) } + shouldSync := false + if updates.Status != nil && (*updates.Status == service.StatusError || *updates.Status == service.StatusDisabled) { + shouldSync = true + } + if updates.Schedulable != nil && !*updates.Schedulable { + shouldSync = true + } + if shouldSync { + for _, id := range ids { + r.syncSchedulerAccountSnapshot(ctx, id) + } + } } return rows, nil } diff --git a/backend/internal/repository/account_repo_integration_test.go b/backend/internal/repository/account_repo_integration_test.go index 250b141d..a054b6d6 100644 --- a/backend/internal/repository/account_repo_integration_test.go +++ b/backend/internal/repository/account_repo_integration_test.go @@ -21,11 +21,56 @@ type AccountRepoSuite struct { repo *accountRepository } +type schedulerCacheRecorder struct { + setAccounts []*service.Account +} + +func (s *schedulerCacheRecorder) GetSnapshot(ctx context.Context, bucket service.SchedulerBucket) ([]*service.Account, bool, error) { + return nil, false, nil +} + +func (s *schedulerCacheRecorder) SetSnapshot(ctx context.Context, bucket service.SchedulerBucket, accounts []service.Account) error { + return nil +} + +func (s *schedulerCacheRecorder) GetAccount(ctx context.Context, accountID int64) (*service.Account, error) { + return nil, nil +} + +func (s *schedulerCacheRecorder) SetAccount(ctx context.Context, account *service.Account) error { + s.setAccounts = append(s.setAccounts, account) + return nil +} + +func (s *schedulerCacheRecorder) DeleteAccount(ctx context.Context, accountID int64) error { + return nil +} + +func (s *schedulerCacheRecorder) UpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + return nil +} + +func (s *schedulerCacheRecorder) TryLockBucket(ctx context.Context, bucket service.SchedulerBucket, ttl time.Duration) (bool, error) { + return true, nil +} + +func (s *schedulerCacheRecorder) ListBuckets(ctx context.Context) ([]service.SchedulerBucket, error) { + return nil, nil +} + +func (s *schedulerCacheRecorder) GetOutboxWatermark(ctx context.Context) (int64, error) { + return 0, nil +} + +func (s *schedulerCacheRecorder) SetOutboxWatermark(ctx context.Context, id int64) error { + return nil +} + func (s *AccountRepoSuite) SetupTest() { s.ctx = context.Background() tx := testEntTx(s.T()) s.client = tx.Client() - s.repo = newAccountRepositoryWithSQL(s.client, tx) + s.repo = newAccountRepositoryWithSQL(s.client, tx, nil) } func TestAccountRepoSuite(t *testing.T) { @@ -73,6 +118,20 @@ func (s *AccountRepoSuite) TestUpdate() { s.Require().Equal("updated", got.Name) } +func (s *AccountRepoSuite) TestUpdate_SyncSchedulerSnapshotOnDisabled() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "sync-update", Status: service.StatusActive, Schedulable: true}) + cacheRecorder := &schedulerCacheRecorder{} + s.repo.schedulerCache = cacheRecorder + + account.Status = service.StatusDisabled + err := s.repo.Update(s.ctx, account) + s.Require().NoError(err, "Update") + + s.Require().Len(cacheRecorder.setAccounts, 1) + s.Require().Equal(account.ID, cacheRecorder.setAccounts[0].ID) + s.Require().Equal(service.StatusDisabled, cacheRecorder.setAccounts[0].Status) +} + func (s *AccountRepoSuite) TestDelete() { account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "to-delete"}) @@ -174,7 +233,7 @@ func (s *AccountRepoSuite) TestListWithFilters() { // 每个 case 重新获取隔离资源 tx := testEntTx(s.T()) client := tx.Client() - repo := newAccountRepositoryWithSQL(client, tx) + repo := newAccountRepositoryWithSQL(client, tx, nil) ctx := context.Background() tt.setup(client) @@ -365,12 +424,38 @@ func (s *AccountRepoSuite) TestListSchedulableByGroupIDAndPlatform() { func (s *AccountRepoSuite) TestSetSchedulable() { account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-sched", Schedulable: true}) + cacheRecorder := &schedulerCacheRecorder{} + s.repo.schedulerCache = cacheRecorder s.Require().NoError(s.repo.SetSchedulable(s.ctx, account.ID, false)) got, err := s.repo.GetByID(s.ctx, account.ID) s.Require().NoError(err) s.Require().False(got.Schedulable) + s.Require().Len(cacheRecorder.setAccounts, 1) + s.Require().Equal(account.ID, cacheRecorder.setAccounts[0].ID) +} + +func (s *AccountRepoSuite) TestBulkUpdate_SyncSchedulerSnapshotOnDisabled() { + account1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk-1", Status: service.StatusActive, Schedulable: true}) + account2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk-2", Status: service.StatusActive, Schedulable: true}) + cacheRecorder := &schedulerCacheRecorder{} + s.repo.schedulerCache = cacheRecorder + + disabled := service.StatusDisabled + rows, err := s.repo.BulkUpdate(s.ctx, []int64{account1.ID, account2.ID}, service.AccountBulkUpdate{ + Status: &disabled, + }) + s.Require().NoError(err) + s.Require().Equal(int64(2), rows) + + s.Require().Len(cacheRecorder.setAccounts, 2) + ids := map[int64]struct{}{} + for _, acc := range cacheRecorder.setAccounts { + ids[acc.ID] = struct{}{} + } + s.Require().Contains(ids, account1.ID) + s.Require().Contains(ids, account2.ID) } // --- SetOverloaded / SetRateLimited / ClearRateLimit --- diff --git a/backend/internal/repository/aes_encryptor.go b/backend/internal/repository/aes_encryptor.go new file mode 100644 index 00000000..924e3698 --- /dev/null +++ b/backend/internal/repository/aes_encryptor.go @@ -0,0 +1,95 @@ +package repository + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +// AESEncryptor implements SecretEncryptor using AES-256-GCM +type AESEncryptor struct { + key []byte +} + +// NewAESEncryptor creates a new AES encryptor +func NewAESEncryptor(cfg *config.Config) (service.SecretEncryptor, error) { + key, err := hex.DecodeString(cfg.Totp.EncryptionKey) + if err != nil { + return nil, fmt.Errorf("invalid totp encryption key: %w", err) + } + + if len(key) != 32 { + return nil, fmt.Errorf("totp encryption key must be 32 bytes (64 hex chars), got %d bytes", len(key)) + } + + return &AESEncryptor{key: key}, nil +} + +// Encrypt encrypts plaintext using AES-256-GCM +// Output format: base64(nonce + ciphertext + tag) +func (e *AESEncryptor) Encrypt(plaintext string) (string, error) { + block, err := aes.NewCipher(e.key) + if err != nil { + return "", fmt.Errorf("create cipher: %w", err) + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", fmt.Errorf("create gcm: %w", err) + } + + // Generate a random nonce + nonce := make([]byte, gcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", fmt.Errorf("generate nonce: %w", err) + } + + // Encrypt the plaintext + // Seal appends the ciphertext and tag to the nonce + ciphertext := gcm.Seal(nonce, nonce, []byte(plaintext), nil) + + // Encode as base64 + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +// Decrypt decrypts ciphertext using AES-256-GCM +func (e *AESEncryptor) Decrypt(ciphertext string) (string, error) { + // Decode from base64 + data, err := base64.StdEncoding.DecodeString(ciphertext) + if err != nil { + return "", fmt.Errorf("decode base64: %w", err) + } + + block, err := aes.NewCipher(e.key) + if err != nil { + return "", fmt.Errorf("create cipher: %w", err) + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", fmt.Errorf("create gcm: %w", err) + } + + nonceSize := gcm.NonceSize() + if len(data) < nonceSize { + return "", fmt.Errorf("ciphertext too short") + } + + // Extract nonce and ciphertext + nonce, ciphertextData := data[:nonceSize], data[nonceSize:] + + // Decrypt + plaintext, err := gcm.Open(nil, nonce, ciphertextData, nil) + if err != nil { + return "", fmt.Errorf("decrypt: %w", err) + } + + return string(plaintext), nil +} diff --git a/backend/internal/repository/announcement_read_repo.go b/backend/internal/repository/announcement_read_repo.go new file mode 100644 index 00000000..2dc346b1 --- /dev/null +++ b/backend/internal/repository/announcement_read_repo.go @@ -0,0 +1,83 @@ +package repository + +import ( + "context" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/announcementread" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type announcementReadRepository struct { + client *dbent.Client +} + +func NewAnnouncementReadRepository(client *dbent.Client) service.AnnouncementReadRepository { + return &announcementReadRepository{client: client} +} + +func (r *announcementReadRepository) MarkRead(ctx context.Context, announcementID, userID int64, readAt time.Time) error { + client := clientFromContext(ctx, r.client) + return client.AnnouncementRead.Create(). + SetAnnouncementID(announcementID). + SetUserID(userID). + SetReadAt(readAt). + OnConflictColumns(announcementread.FieldAnnouncementID, announcementread.FieldUserID). + DoNothing(). + Exec(ctx) +} + +func (r *announcementReadRepository) GetReadMapByUser(ctx context.Context, userID int64, announcementIDs []int64) (map[int64]time.Time, error) { + if len(announcementIDs) == 0 { + return map[int64]time.Time{}, nil + } + + rows, err := r.client.AnnouncementRead.Query(). + Where( + announcementread.UserIDEQ(userID), + announcementread.AnnouncementIDIn(announcementIDs...), + ). + All(ctx) + if err != nil { + return nil, err + } + + out := make(map[int64]time.Time, len(rows)) + for i := range rows { + out[rows[i].AnnouncementID] = rows[i].ReadAt + } + return out, nil +} + +func (r *announcementReadRepository) GetReadMapByUsers(ctx context.Context, announcementID int64, userIDs []int64) (map[int64]time.Time, error) { + if len(userIDs) == 0 { + return map[int64]time.Time{}, nil + } + + rows, err := r.client.AnnouncementRead.Query(). + Where( + announcementread.AnnouncementIDEQ(announcementID), + announcementread.UserIDIn(userIDs...), + ). + All(ctx) + if err != nil { + return nil, err + } + + out := make(map[int64]time.Time, len(rows)) + for i := range rows { + out[rows[i].UserID] = rows[i].ReadAt + } + return out, nil +} + +func (r *announcementReadRepository) CountByAnnouncementID(ctx context.Context, announcementID int64) (int64, error) { + count, err := r.client.AnnouncementRead.Query(). + Where(announcementread.AnnouncementIDEQ(announcementID)). + Count(ctx) + if err != nil { + return 0, err + } + return int64(count), nil +} diff --git a/backend/internal/repository/announcement_repo.go b/backend/internal/repository/announcement_repo.go new file mode 100644 index 00000000..52029e4e --- /dev/null +++ b/backend/internal/repository/announcement_repo.go @@ -0,0 +1,194 @@ +package repository + +import ( + "context" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/announcement" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type announcementRepository struct { + client *dbent.Client +} + +func NewAnnouncementRepository(client *dbent.Client) service.AnnouncementRepository { + return &announcementRepository{client: client} +} + +func (r *announcementRepository) Create(ctx context.Context, a *service.Announcement) error { + client := clientFromContext(ctx, r.client) + builder := client.Announcement.Create(). + SetTitle(a.Title). + SetContent(a.Content). + SetStatus(a.Status). + SetTargeting(a.Targeting) + + if a.StartsAt != nil { + builder.SetStartsAt(*a.StartsAt) + } + if a.EndsAt != nil { + builder.SetEndsAt(*a.EndsAt) + } + if a.CreatedBy != nil { + builder.SetCreatedBy(*a.CreatedBy) + } + if a.UpdatedBy != nil { + builder.SetUpdatedBy(*a.UpdatedBy) + } + + created, err := builder.Save(ctx) + if err != nil { + return err + } + + applyAnnouncementEntityToService(a, created) + return nil +} + +func (r *announcementRepository) GetByID(ctx context.Context, id int64) (*service.Announcement, error) { + m, err := r.client.Announcement.Query(). + Where(announcement.IDEQ(id)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrAnnouncementNotFound, nil) + } + return announcementEntityToService(m), nil +} + +func (r *announcementRepository) Update(ctx context.Context, a *service.Announcement) error { + client := clientFromContext(ctx, r.client) + builder := client.Announcement.UpdateOneID(a.ID). + SetTitle(a.Title). + SetContent(a.Content). + SetStatus(a.Status). + SetTargeting(a.Targeting) + + if a.StartsAt != nil { + builder.SetStartsAt(*a.StartsAt) + } else { + builder.ClearStartsAt() + } + if a.EndsAt != nil { + builder.SetEndsAt(*a.EndsAt) + } else { + builder.ClearEndsAt() + } + if a.CreatedBy != nil { + builder.SetCreatedBy(*a.CreatedBy) + } else { + builder.ClearCreatedBy() + } + if a.UpdatedBy != nil { + builder.SetUpdatedBy(*a.UpdatedBy) + } else { + builder.ClearUpdatedBy() + } + + updated, err := builder.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrAnnouncementNotFound, nil) + } + + a.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *announcementRepository) Delete(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.Announcement.Delete().Where(announcement.IDEQ(id)).Exec(ctx) + return err +} + +func (r *announcementRepository) List( + ctx context.Context, + params pagination.PaginationParams, + filters service.AnnouncementListFilters, +) ([]service.Announcement, *pagination.PaginationResult, error) { + q := r.client.Announcement.Query() + + if filters.Status != "" { + q = q.Where(announcement.StatusEQ(filters.Status)) + } + if filters.Search != "" { + q = q.Where( + announcement.Or( + announcement.TitleContainsFold(filters.Search), + announcement.ContentContainsFold(filters.Search), + ), + ) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + items, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(announcement.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + out := announcementEntitiesToService(items) + return out, paginationResultFromTotal(int64(total), params), nil +} + +func (r *announcementRepository) ListActive(ctx context.Context, now time.Time) ([]service.Announcement, error) { + q := r.client.Announcement.Query(). + Where( + announcement.StatusEQ(service.AnnouncementStatusActive), + announcement.Or(announcement.StartsAtIsNil(), announcement.StartsAtLTE(now)), + announcement.Or(announcement.EndsAtIsNil(), announcement.EndsAtGT(now)), + ). + Order(dbent.Desc(announcement.FieldID)) + + items, err := q.All(ctx) + if err != nil { + return nil, err + } + return announcementEntitiesToService(items), nil +} + +func applyAnnouncementEntityToService(dst *service.Announcement, src *dbent.Announcement) { + if dst == nil || src == nil { + return + } + dst.ID = src.ID + dst.CreatedAt = src.CreatedAt + dst.UpdatedAt = src.UpdatedAt +} + +func announcementEntityToService(m *dbent.Announcement) *service.Announcement { + if m == nil { + return nil + } + return &service.Announcement{ + ID: m.ID, + Title: m.Title, + Content: m.Content, + Status: m.Status, + Targeting: m.Targeting, + StartsAt: m.StartsAt, + EndsAt: m.EndsAt, + CreatedBy: m.CreatedBy, + UpdatedBy: m.UpdatedBy, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func announcementEntitiesToService(models []*dbent.Announcement) []service.Announcement { + out := make([]service.Announcement, 0, len(models)) + for i := range models { + if s := announcementEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} diff --git a/backend/internal/repository/api_key_cache.go b/backend/internal/repository/api_key_cache.go index 6d834b40..a1072057 100644 --- a/backend/internal/repository/api_key_cache.go +++ b/backend/internal/repository/api_key_cache.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "log" "time" "github.com/Wei-Shaw/sub2api/internal/service" @@ -12,9 +13,10 @@ import ( ) const ( - apiKeyRateLimitKeyPrefix = "apikey:ratelimit:" - apiKeyRateLimitDuration = 24 * time.Hour - apiKeyAuthCachePrefix = "apikey:auth:" + apiKeyRateLimitKeyPrefix = "apikey:ratelimit:" + apiKeyRateLimitDuration = 24 * time.Hour + apiKeyAuthCachePrefix = "apikey:auth:" + authCacheInvalidateChannel = "auth:cache:invalidate" ) // apiKeyRateLimitKey generates the Redis key for API key creation rate limiting. @@ -91,3 +93,45 @@ func (c *apiKeyCache) SetAuthCache(ctx context.Context, key string, entry *servi func (c *apiKeyCache) DeleteAuthCache(ctx context.Context, key string) error { return c.rdb.Del(ctx, apiKeyAuthCacheKey(key)).Err() } + +// PublishAuthCacheInvalidation publishes a cache invalidation message to all instances +func (c *apiKeyCache) PublishAuthCacheInvalidation(ctx context.Context, cacheKey string) error { + return c.rdb.Publish(ctx, authCacheInvalidateChannel, cacheKey).Err() +} + +// SubscribeAuthCacheInvalidation subscribes to cache invalidation messages +func (c *apiKeyCache) SubscribeAuthCacheInvalidation(ctx context.Context, handler func(cacheKey string)) error { + pubsub := c.rdb.Subscribe(ctx, authCacheInvalidateChannel) + + // Verify subscription is working + _, err := pubsub.Receive(ctx) + if err != nil { + _ = pubsub.Close() + return fmt.Errorf("subscribe to auth cache invalidation: %w", err) + } + + go func() { + defer func() { + if err := pubsub.Close(); err != nil { + log.Printf("Warning: failed to close auth cache invalidation pubsub: %v", err) + } + }() + + ch := pubsub.Channel() + for { + select { + case <-ctx.Done(): + return + case msg, ok := <-ch: + if !ok { + return + } + if msg != nil { + handler(msg.Payload) + } + } + } + }() + + return nil +} diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index e9af365c..e3580a67 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -389,17 +389,20 @@ func userEntityToService(u *dbent.User) *service.User { return nil } return &service.User{ - ID: u.ID, - Email: u.Email, - Username: u.Username, - Notes: u.Notes, - PasswordHash: u.PasswordHash, - Role: u.Role, - Balance: u.Balance, - Concurrency: u.Concurrency, - Status: u.Status, - CreatedAt: u.CreatedAt, - UpdatedAt: u.UpdatedAt, + ID: u.ID, + Email: u.Email, + Username: u.Username, + Notes: u.Notes, + PasswordHash: u.PasswordHash, + Role: u.Role, + Balance: u.Balance, + Concurrency: u.Concurrency, + Status: u.Status, + TotpSecretEncrypted: u.TotpSecretEncrypted, + TotpEnabled: u.TotpEnabled, + TotpEnabledAt: u.TotpEnabledAt, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, } } diff --git a/backend/internal/repository/claude_oauth_service.go b/backend/internal/repository/claude_oauth_service.go index 677fce52..fc0d2918 100644 --- a/backend/internal/repository/claude_oauth_service.go +++ b/backend/internal/repository/claude_oauth_service.go @@ -35,7 +35,9 @@ func (s *claudeOAuthService) GetOrganizationUUID(ctx context.Context, sessionKey client := s.clientFactory(proxyURL) var orgs []struct { - UUID string `json:"uuid"` + UUID string `json:"uuid"` + Name string `json:"name"` + RavenType *string `json:"raven_type"` // nil for personal, "team" for team organization } targetURL := s.baseURL + "/api/organizations" @@ -65,7 +67,23 @@ func (s *claudeOAuthService) GetOrganizationUUID(ctx context.Context, sessionKey return "", fmt.Errorf("no organizations found") } - log.Printf("[OAuth] Step 1 SUCCESS - Got org UUID: %s", orgs[0].UUID) + // 如果只有一个组织,直接使用 + if len(orgs) == 1 { + log.Printf("[OAuth] Step 1 SUCCESS - Single org found, UUID: %s, Name: %s", orgs[0].UUID, orgs[0].Name) + return orgs[0].UUID, nil + } + + // 如果有多个组织,优先选择 raven_type 为 "team" 的组织 + for _, org := range orgs { + if org.RavenType != nil && *org.RavenType == "team" { + log.Printf("[OAuth] Step 1 SUCCESS - Selected team org, UUID: %s, Name: %s, RavenType: %s", + org.UUID, org.Name, *org.RavenType) + return org.UUID, nil + } + } + + // 如果没有 team 类型的组织,使用第一个 + log.Printf("[OAuth] Step 1 SUCCESS - No team org found, using first org, UUID: %s, Name: %s", orgs[0].UUID, orgs[0].Name) return orgs[0].UUID, nil } @@ -182,7 +200,9 @@ func (s *claudeOAuthService) ExchangeCodeForToken(ctx context.Context, code, cod resp, err := client.R(). SetContext(ctx). + SetHeader("Accept", "application/json, text/plain, */*"). SetHeader("Content-Type", "application/json"). + SetHeader("User-Agent", "axios/1.8.4"). SetBody(reqBody). SetSuccessResult(&tokenResp). Post(s.tokenURL) @@ -205,8 +225,6 @@ func (s *claudeOAuthService) ExchangeCodeForToken(ctx context.Context, code, cod func (s *claudeOAuthService) RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*oauth.TokenResponse, error) { client := s.clientFactory(proxyURL) - // 使用 JSON 格式(与 ExchangeCodeForToken 保持一致) - // Anthropic OAuth API 期望 JSON 格式的请求体 reqBody := map[string]any{ "grant_type": "refresh_token", "refresh_token": refreshToken, @@ -217,7 +235,9 @@ func (s *claudeOAuthService) RefreshToken(ctx context.Context, refreshToken, pro resp, err := client.R(). SetContext(ctx). + SetHeader("Accept", "application/json, text/plain, */*"). SetHeader("Content-Type", "application/json"). + SetHeader("User-Agent", "axios/1.8.4"). SetBody(reqBody). SetSuccessResult(&tokenResp). Post(s.tokenURL) diff --git a/backend/internal/repository/claude_oauth_service_test.go b/backend/internal/repository/claude_oauth_service_test.go index a7f76056..7395c6d8 100644 --- a/backend/internal/repository/claude_oauth_service_test.go +++ b/backend/internal/repository/claude_oauth_service_test.go @@ -171,7 +171,7 @@ func (s *ClaudeOAuthServiceSuite) TestGetAuthorizationCode() { s.client.baseURL = "http://in-process" s.client.clientFactory = func(string) *req.Client { return newTestReqClient(rt) } - code, err := s.client.GetAuthorizationCode(context.Background(), "sess", "org-1", oauth.ScopeProfile, "cc", "st", "") + code, err := s.client.GetAuthorizationCode(context.Background(), "sess", "org-1", oauth.ScopeInference, "cc", "st", "") if tt.wantErr { require.Error(s.T(), err) diff --git a/backend/internal/repository/claude_usage_service.go b/backend/internal/repository/claude_usage_service.go index 4c87b2de..1198f472 100644 --- a/backend/internal/repository/claude_usage_service.go +++ b/backend/internal/repository/claude_usage_service.go @@ -14,37 +14,82 @@ import ( const defaultClaudeUsageURL = "https://api.anthropic.com/api/oauth/usage" +// 默认 User-Agent,与用户抓包的请求一致 +const defaultUsageUserAgent = "claude-code/2.1.7" + type claudeUsageService struct { usageURL string allowPrivateHosts bool + httpUpstream service.HTTPUpstream } -func NewClaudeUsageFetcher() service.ClaudeUsageFetcher { - return &claudeUsageService{usageURL: defaultClaudeUsageURL} +// NewClaudeUsageFetcher 创建 Claude 用量获取服务 +// httpUpstream: 可选,如果提供则支持 TLS 指纹伪装 +func NewClaudeUsageFetcher(httpUpstream service.HTTPUpstream) service.ClaudeUsageFetcher { + return &claudeUsageService{ + usageURL: defaultClaudeUsageURL, + httpUpstream: httpUpstream, + } } +// FetchUsage 简单版本,不支持 TLS 指纹(向后兼容) func (s *claudeUsageService) FetchUsage(ctx context.Context, accessToken, proxyURL string) (*service.ClaudeUsageResponse, error) { - client, err := httpclient.GetClient(httpclient.Options{ - ProxyURL: proxyURL, - Timeout: 30 * time.Second, - ValidateResolvedIP: true, - AllowPrivateHosts: s.allowPrivateHosts, + return s.FetchUsageWithOptions(ctx, &service.ClaudeUsageFetchOptions{ + AccessToken: accessToken, + ProxyURL: proxyURL, }) - if err != nil { - client = &http.Client{Timeout: 30 * time.Second} +} + +// FetchUsageWithOptions 完整版本,支持 TLS 指纹和自定义 User-Agent +func (s *claudeUsageService) FetchUsageWithOptions(ctx context.Context, opts *service.ClaudeUsageFetchOptions) (*service.ClaudeUsageResponse, error) { + if opts == nil { + return nil, fmt.Errorf("options is nil") } + // 创建请求 req, err := http.NewRequestWithContext(ctx, "GET", s.usageURL, nil) if err != nil { return nil, fmt.Errorf("create request failed: %w", err) } - req.Header.Set("Authorization", "Bearer "+accessToken) + // 设置请求头(与抓包一致,但不设置 Accept-Encoding,让 Go 自动处理压缩) + req.Header.Set("Accept", "application/json, text/plain, */*") + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+opts.AccessToken) req.Header.Set("anthropic-beta", "oauth-2025-04-20") - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("request failed: %w", err) + // 设置 User-Agent(优先使用缓存的 Fingerprint,否则使用默认值) + userAgent := defaultUsageUserAgent + if opts.Fingerprint != nil && opts.Fingerprint.UserAgent != "" { + userAgent = opts.Fingerprint.UserAgent + } + req.Header.Set("User-Agent", userAgent) + + var resp *http.Response + + // 如果启用 TLS 指纹且有 HTTPUpstream,使用 DoWithTLS + if opts.EnableTLSFingerprint && s.httpUpstream != nil { + // accountConcurrency 传 0 使用默认连接池配置,usage 请求不需要特殊的并发设置 + resp, err = s.httpUpstream.DoWithTLS(req, opts.ProxyURL, opts.AccountID, 0, true) + if err != nil { + return nil, fmt.Errorf("request with TLS fingerprint failed: %w", err) + } + } else { + // 不启用 TLS 指纹,使用普通 HTTP 客户端 + client, err := httpclient.GetClient(httpclient.Options{ + ProxyURL: opts.ProxyURL, + Timeout: 30 * time.Second, + ValidateResolvedIP: true, + AllowPrivateHosts: s.allowPrivateHosts, + }) + if err != nil { + client = &http.Client{Timeout: 30 * time.Second} + } + + resp, err = client.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } } defer func() { _ = resp.Body.Close() }() diff --git a/backend/internal/repository/dashboard_aggregation_repo.go b/backend/internal/repository/dashboard_aggregation_repo.go index 3543e061..59bbd6a3 100644 --- a/backend/internal/repository/dashboard_aggregation_repo.go +++ b/backend/internal/repository/dashboard_aggregation_repo.go @@ -77,6 +77,75 @@ func (r *dashboardAggregationRepository) AggregateRange(ctx context.Context, sta return nil } +func (r *dashboardAggregationRepository) RecomputeRange(ctx context.Context, start, end time.Time) error { + if r == nil || r.sql == nil { + return nil + } + loc := timezone.Location() + startLocal := start.In(loc) + endLocal := end.In(loc) + if !endLocal.After(startLocal) { + return nil + } + + hourStart := startLocal.Truncate(time.Hour) + hourEnd := endLocal.Truncate(time.Hour) + if endLocal.After(hourEnd) { + hourEnd = hourEnd.Add(time.Hour) + } + + dayStart := truncateToDay(startLocal) + dayEnd := truncateToDay(endLocal) + if endLocal.After(dayEnd) { + dayEnd = dayEnd.Add(24 * time.Hour) + } + + // 尽量使用事务保证范围内的一致性(允许在非 *sql.DB 的情况下退化为非事务执行)。 + if db, ok := r.sql.(*sql.DB); ok { + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + txRepo := newDashboardAggregationRepositoryWithSQL(tx) + if err := txRepo.recomputeRangeInTx(ctx, hourStart, hourEnd, dayStart, dayEnd); err != nil { + _ = tx.Rollback() + return err + } + return tx.Commit() + } + return r.recomputeRangeInTx(ctx, hourStart, hourEnd, dayStart, dayEnd) +} + +func (r *dashboardAggregationRepository) recomputeRangeInTx(ctx context.Context, hourStart, hourEnd, dayStart, dayEnd time.Time) error { + // 先清空范围内桶,再重建(避免仅增量插入导致活跃用户等指标无法回退)。 + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly WHERE bucket_start >= $1 AND bucket_start < $2", hourStart, hourEnd); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly_users WHERE bucket_start >= $1 AND bucket_start < $2", hourStart, hourEnd); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily WHERE bucket_date >= $1::date AND bucket_date < $2::date", dayStart, dayEnd); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily_users WHERE bucket_date >= $1::date AND bucket_date < $2::date", dayStart, dayEnd); err != nil { + return err + } + + if err := r.insertHourlyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.insertDailyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertHourlyAggregates(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertDailyAggregates(ctx, dayStart, dayEnd); err != nil { + return err + } + return nil +} + func (r *dashboardAggregationRepository) GetAggregationWatermark(ctx context.Context) (time.Time, error) { var ts time.Time query := "SELECT last_aggregated_at FROM usage_dashboard_aggregation_watermark WHERE id = 1" diff --git a/backend/internal/repository/email_cache.go b/backend/internal/repository/email_cache.go index e00e35dd..8f2b8eca 100644 --- a/backend/internal/repository/email_cache.go +++ b/backend/internal/repository/email_cache.go @@ -9,13 +9,27 @@ import ( "github.com/redis/go-redis/v9" ) -const verifyCodeKeyPrefix = "verify_code:" +const ( + verifyCodeKeyPrefix = "verify_code:" + passwordResetKeyPrefix = "password_reset:" + passwordResetSentAtKeyPrefix = "password_reset_sent:" +) // verifyCodeKey generates the Redis key for email verification code. func verifyCodeKey(email string) string { return verifyCodeKeyPrefix + email } +// passwordResetKey generates the Redis key for password reset token. +func passwordResetKey(email string) string { + return passwordResetKeyPrefix + email +} + +// passwordResetSentAtKey generates the Redis key for password reset email sent timestamp. +func passwordResetSentAtKey(email string) string { + return passwordResetSentAtKeyPrefix + email +} + type emailCache struct { rdb *redis.Client } @@ -50,3 +64,45 @@ func (c *emailCache) DeleteVerificationCode(ctx context.Context, email string) e key := verifyCodeKey(email) return c.rdb.Del(ctx, key).Err() } + +// Password reset token methods + +func (c *emailCache) GetPasswordResetToken(ctx context.Context, email string) (*service.PasswordResetTokenData, error) { + key := passwordResetKey(email) + val, err := c.rdb.Get(ctx, key).Result() + if err != nil { + return nil, err + } + var data service.PasswordResetTokenData + if err := json.Unmarshal([]byte(val), &data); err != nil { + return nil, err + } + return &data, nil +} + +func (c *emailCache) SetPasswordResetToken(ctx context.Context, email string, data *service.PasswordResetTokenData, ttl time.Duration) error { + key := passwordResetKey(email) + val, err := json.Marshal(data) + if err != nil { + return err + } + return c.rdb.Set(ctx, key, val, ttl).Err() +} + +func (c *emailCache) DeletePasswordResetToken(ctx context.Context, email string) error { + key := passwordResetKey(email) + return c.rdb.Del(ctx, key).Err() +} + +// Password reset email cooldown methods + +func (c *emailCache) IsPasswordResetEmailInCooldown(ctx context.Context, email string) bool { + key := passwordResetSentAtKey(email) + exists, err := c.rdb.Exists(ctx, key).Result() + return err == nil && exists > 0 +} + +func (c *emailCache) SetPasswordResetEmailCooldown(ctx context.Context, email string, ttl time.Duration) error { + key := passwordResetSentAtKey(email) + return c.rdb.Set(ctx, key, "1", ttl).Err() +} diff --git a/backend/internal/repository/ent.go b/backend/internal/repository/ent.go index 8005f114..d7d574e8 100644 --- a/backend/internal/repository/ent.go +++ b/backend/internal/repository/ent.go @@ -65,5 +65,18 @@ func InitEnt(cfg *config.Config) (*ent.Client, *sql.DB, error) { // 创建 Ent 客户端,绑定到已配置的数据库驱动。 client := ent.NewClient(ent.Driver(drv)) + + // SIMPLE 模式:启动时补齐各平台默认分组。 + // - anthropic/openai/gemini: 确保存在 -default + // - antigravity: 仅要求存在 >=2 个未软删除分组(用于 claude/gemini 混合调度场景) + if cfg.RunMode == config.RunModeSimple { + seedCtx, seedCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer seedCancel() + if err := ensureSimpleModeDefaultGroups(seedCtx, client); err != nil { + _ = client.Close() + return nil, nil, err + } + } + return client, drv.DB(), nil } diff --git a/backend/internal/repository/gateway_cache.go b/backend/internal/repository/gateway_cache.go index 40a9ad05..58291b66 100644 --- a/backend/internal/repository/gateway_cache.go +++ b/backend/internal/repository/gateway_cache.go @@ -39,3 +39,15 @@ func (c *gatewayCache) RefreshSessionTTL(ctx context.Context, groupID int64, ses key := buildSessionKey(groupID, sessionHash) return c.rdb.Expire(ctx, key, ttl).Err() } + +// DeleteSessionAccountID 删除粘性会话与账号的绑定关系。 +// 当检测到绑定的账号不可用(如状态错误、禁用、不可调度等)时调用, +// 以便下次请求能够重新选择可用账号。 +// +// DeleteSessionAccountID removes the sticky session binding for the given session. +// Called when the bound account becomes unavailable (e.g., error status, disabled, +// or unschedulable), allowing subsequent requests to select a new available account. +func (c *gatewayCache) DeleteSessionAccountID(ctx context.Context, groupID int64, sessionHash string) error { + key := buildSessionKey(groupID, sessionHash) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/gateway_cache_integration_test.go b/backend/internal/repository/gateway_cache_integration_test.go index d8885bca..0eebc33f 100644 --- a/backend/internal/repository/gateway_cache_integration_test.go +++ b/backend/internal/repository/gateway_cache_integration_test.go @@ -78,6 +78,19 @@ func (s *GatewayCacheSuite) TestRefreshSessionTTL_MissingKey() { require.NoError(s.T(), err, "RefreshSessionTTL on missing key should not error") } +func (s *GatewayCacheSuite) TestDeleteSessionAccountID() { + sessionID := "openai:s4" + accountID := int64(102) + groupID := int64(1) + sessionTTL := 1 * time.Minute + + require.NoError(s.T(), s.cache.SetSessionAccountID(s.ctx, groupID, sessionID, accountID, sessionTTL), "SetSessionAccountID") + require.NoError(s.T(), s.cache.DeleteSessionAccountID(s.ctx, groupID, sessionID), "DeleteSessionAccountID") + + _, err := s.cache.GetSessionAccountID(s.ctx, groupID, sessionID) + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil after delete") +} + func (s *GatewayCacheSuite) TestGetSessionAccountID_CorruptedValue() { sessionID := "corrupted" groupID := int64(1) diff --git a/backend/internal/repository/gateway_routing_integration_test.go b/backend/internal/repository/gateway_routing_integration_test.go index 5566d2e9..77591fe3 100644 --- a/backend/internal/repository/gateway_routing_integration_test.go +++ b/backend/internal/repository/gateway_routing_integration_test.go @@ -24,7 +24,7 @@ func (s *GatewayRoutingSuite) SetupTest() { s.ctx = context.Background() tx := testEntTx(s.T()) s.client = tx.Client() - s.accountRepo = newAccountRepositoryWithSQL(s.client, tx) + s.accountRepo = newAccountRepositoryWithSQL(s.client, tx, nil) } func TestGatewayRoutingSuite(t *testing.T) { diff --git a/backend/internal/repository/http_upstream.go b/backend/internal/repository/http_upstream.go index feb32541..b0f15f19 100644 --- a/backend/internal/repository/http_upstream.go +++ b/backend/internal/repository/http_upstream.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -14,6 +15,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/proxyutil" + "github.com/Wei-Shaw/sub2api/internal/pkg/tlsfingerprint" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" ) @@ -150,6 +152,172 @@ func (s *httpUpstreamService) Do(req *http.Request, proxyURL string, accountID i return resp, nil } +// DoWithTLS 执行带 TLS 指纹伪装的 HTTP 请求 +// 根据 enableTLSFingerprint 参数决定是否使用 TLS 指纹 +// +// 参数: +// - req: HTTP 请求对象 +// - proxyURL: 代理地址,空字符串表示直连 +// - accountID: 账户 ID,用于账户级隔离和 TLS 指纹模板选择 +// - accountConcurrency: 账户并发限制,用于动态调整连接池大小 +// - enableTLSFingerprint: 是否启用 TLS 指纹伪装 +// +// TLS 指纹说明: +// - 当 enableTLSFingerprint=true 时,使用 utls 库模拟 Claude CLI 的 TLS 指纹 +// - 指纹模板根据 accountID % len(profiles) 自动选择 +// - 支持直连、HTTP/HTTPS 代理、SOCKS5 代理三种场景 +func (s *httpUpstreamService) DoWithTLS(req *http.Request, proxyURL string, accountID int64, accountConcurrency int, enableTLSFingerprint bool) (*http.Response, error) { + // 如果未启用 TLS 指纹,直接使用标准请求路径 + if !enableTLSFingerprint { + return s.Do(req, proxyURL, accountID, accountConcurrency) + } + + // TLS 指纹已启用,记录调试日志 + targetHost := "" + if req != nil && req.URL != nil { + targetHost = req.URL.Host + } + proxyInfo := "direct" + if proxyURL != "" { + proxyInfo = proxyURL + } + slog.Debug("tls_fingerprint_enabled", "account_id", accountID, "target", targetHost, "proxy", proxyInfo) + + if err := s.validateRequestHost(req); err != nil { + return nil, err + } + + // 获取 TLS 指纹 Profile + registry := tlsfingerprint.GlobalRegistry() + profile := registry.GetProfileByAccountID(accountID) + if profile == nil { + // 如果获取不到 profile,回退到普通请求 + slog.Debug("tls_fingerprint_no_profile", "account_id", accountID, "fallback", "standard_request") + return s.Do(req, proxyURL, accountID, accountConcurrency) + } + + slog.Debug("tls_fingerprint_using_profile", "account_id", accountID, "profile", profile.Name, "grease", profile.EnableGREASE) + + // 获取或创建带 TLS 指纹的客户端 + entry, err := s.acquireClientWithTLS(proxyURL, accountID, accountConcurrency, profile) + if err != nil { + slog.Debug("tls_fingerprint_acquire_client_failed", "account_id", accountID, "error", err) + return nil, err + } + + // 执行请求 + resp, err := entry.client.Do(req) + if err != nil { + // 请求失败,立即减少计数 + atomic.AddInt64(&entry.inFlight, -1) + atomic.StoreInt64(&entry.lastUsed, time.Now().UnixNano()) + slog.Debug("tls_fingerprint_request_failed", "account_id", accountID, "error", err) + return nil, err + } + + slog.Debug("tls_fingerprint_request_success", "account_id", accountID, "status", resp.StatusCode) + + // 包装响应体,在关闭时自动减少计数并更新时间戳 + resp.Body = wrapTrackedBody(resp.Body, func() { + atomic.AddInt64(&entry.inFlight, -1) + atomic.StoreInt64(&entry.lastUsed, time.Now().UnixNano()) + }) + + return resp, nil +} + +// acquireClientWithTLS 获取或创建带 TLS 指纹的客户端 +func (s *httpUpstreamService) acquireClientWithTLS(proxyURL string, accountID int64, accountConcurrency int, profile *tlsfingerprint.Profile) (*upstreamClientEntry, error) { + return s.getClientEntryWithTLS(proxyURL, accountID, accountConcurrency, profile, true, true) +} + +// getClientEntryWithTLS 获取或创建带 TLS 指纹的客户端条目 +// TLS 指纹客户端使用独立的缓存键,与普通客户端隔离 +func (s *httpUpstreamService) getClientEntryWithTLS(proxyURL string, accountID int64, accountConcurrency int, profile *tlsfingerprint.Profile, markInFlight bool, enforceLimit bool) (*upstreamClientEntry, error) { + isolation := s.getIsolationMode() + proxyKey, parsedProxy := normalizeProxyURL(proxyURL) + // TLS 指纹客户端使用独立的缓存键,加 "tls:" 前缀 + cacheKey := "tls:" + buildCacheKey(isolation, proxyKey, accountID) + poolKey := s.buildPoolKey(isolation, accountConcurrency) + ":tls" + + now := time.Now() + nowUnix := now.UnixNano() + + // 读锁快速路径 + s.mu.RLock() + if entry, ok := s.clients[cacheKey]; ok && s.shouldReuseEntry(entry, isolation, proxyKey, poolKey) { + atomic.StoreInt64(&entry.lastUsed, nowUnix) + if markInFlight { + atomic.AddInt64(&entry.inFlight, 1) + } + s.mu.RUnlock() + slog.Debug("tls_fingerprint_reusing_client", "account_id", accountID, "cache_key", cacheKey) + return entry, nil + } + s.mu.RUnlock() + + // 写锁慢路径 + s.mu.Lock() + if entry, ok := s.clients[cacheKey]; ok { + if s.shouldReuseEntry(entry, isolation, proxyKey, poolKey) { + atomic.StoreInt64(&entry.lastUsed, nowUnix) + if markInFlight { + atomic.AddInt64(&entry.inFlight, 1) + } + s.mu.Unlock() + slog.Debug("tls_fingerprint_reusing_client", "account_id", accountID, "cache_key", cacheKey) + return entry, nil + } + slog.Debug("tls_fingerprint_evicting_stale_client", + "account_id", accountID, + "cache_key", cacheKey, + "proxy_changed", entry.proxyKey != proxyKey, + "pool_changed", entry.poolKey != poolKey) + s.removeClientLocked(cacheKey, entry) + } + + // 超出缓存上限时尝试淘汰 + if enforceLimit && s.maxUpstreamClients() > 0 { + s.evictIdleLocked(now) + if len(s.clients) >= s.maxUpstreamClients() { + if !s.evictOldestIdleLocked() { + s.mu.Unlock() + return nil, errUpstreamClientLimitReached + } + } + } + + // 创建带 TLS 指纹的 Transport + slog.Debug("tls_fingerprint_creating_new_client", "account_id", accountID, "cache_key", cacheKey, "proxy", proxyKey) + settings := s.resolvePoolSettings(isolation, accountConcurrency) + transport, err := buildUpstreamTransportWithTLSFingerprint(settings, parsedProxy, profile) + if err != nil { + s.mu.Unlock() + return nil, fmt.Errorf("build TLS fingerprint transport: %w", err) + } + + client := &http.Client{Transport: transport} + if s.shouldValidateResolvedIP() { + client.CheckRedirect = s.redirectChecker + } + + entry := &upstreamClientEntry{ + client: client, + proxyKey: proxyKey, + poolKey: poolKey, + } + atomic.StoreInt64(&entry.lastUsed, nowUnix) + if markInFlight { + atomic.StoreInt64(&entry.inFlight, 1) + } + s.clients[cacheKey] = entry + + s.evictIdleLocked(now) + s.evictOverLimitLocked() + s.mu.Unlock() + return entry, nil +} + func (s *httpUpstreamService) shouldValidateResolvedIP() bool { if s.cfg == nil { return false @@ -618,6 +786,64 @@ func buildUpstreamTransport(settings poolSettings, proxyURL *url.URL) (*http.Tra return transport, nil } +// buildUpstreamTransportWithTLSFingerprint 构建带 TLS 指纹伪装的 Transport +// 使用 utls 库模拟 Claude CLI 的 TLS 指纹 +// +// 参数: +// - settings: 连接池配置 +// - proxyURL: 代理 URL(nil 表示直连) +// - profile: TLS 指纹配置 +// +// 返回: +// - *http.Transport: 配置好的 Transport 实例 +// - error: 配置错误 +// +// 代理类型处理: +// - nil/空: 直连,使用 TLSFingerprintDialer +// - http/https: HTTP 代理,使用 HTTPProxyDialer(CONNECT 隧道 + utls 握手) +// - socks5: SOCKS5 代理,使用 SOCKS5ProxyDialer(SOCKS5 隧道 + utls 握手) +func buildUpstreamTransportWithTLSFingerprint(settings poolSettings, proxyURL *url.URL, profile *tlsfingerprint.Profile) (*http.Transport, error) { + transport := &http.Transport{ + MaxIdleConns: settings.maxIdleConns, + MaxIdleConnsPerHost: settings.maxIdleConnsPerHost, + MaxConnsPerHost: settings.maxConnsPerHost, + IdleConnTimeout: settings.idleConnTimeout, + ResponseHeaderTimeout: settings.responseHeaderTimeout, + // 禁用默认的 TLS,我们使用自定义的 DialTLSContext + ForceAttemptHTTP2: false, + } + + // 根据代理类型选择合适的 TLS 指纹 Dialer + if proxyURL == nil { + // 直连:使用 TLSFingerprintDialer + slog.Debug("tls_fingerprint_transport_direct") + dialer := tlsfingerprint.NewDialer(profile, nil) + transport.DialTLSContext = dialer.DialTLSContext + } else { + scheme := strings.ToLower(proxyURL.Scheme) + switch scheme { + case "socks5", "socks5h": + // SOCKS5 代理:使用 SOCKS5ProxyDialer + slog.Debug("tls_fingerprint_transport_socks5", "proxy", proxyURL.Host) + socks5Dialer := tlsfingerprint.NewSOCKS5ProxyDialer(profile, proxyURL) + transport.DialTLSContext = socks5Dialer.DialTLSContext + case "http", "https": + // HTTP/HTTPS 代理:使用 HTTPProxyDialer(CONNECT 隧道) + slog.Debug("tls_fingerprint_transport_http_connect", "proxy", proxyURL.Host) + httpDialer := tlsfingerprint.NewHTTPProxyDialer(profile, proxyURL) + transport.DialTLSContext = httpDialer.DialTLSContext + default: + // 未知代理类型,回退到普通代理配置(无 TLS 指纹) + slog.Debug("tls_fingerprint_transport_unknown_scheme_fallback", "scheme", scheme) + if err := proxyutil.ConfigureTransportProxy(transport, proxyURL); err != nil { + return nil, err + } + } + } + + return transport, nil +} + // trackedBody 带跟踪功能的响应体包装器 // 在 Close 时执行回调,用于更新请求计数 type trackedBody struct { diff --git a/backend/internal/repository/identity_cache.go b/backend/internal/repository/identity_cache.go index d28477b7..c4986547 100644 --- a/backend/internal/repository/identity_cache.go +++ b/backend/internal/repository/identity_cache.go @@ -11,8 +11,10 @@ import ( ) const ( - fingerprintKeyPrefix = "fingerprint:" - fingerprintTTL = 24 * time.Hour + fingerprintKeyPrefix = "fingerprint:" + fingerprintTTL = 24 * time.Hour + maskedSessionKeyPrefix = "masked_session:" + maskedSessionTTL = 15 * time.Minute ) // fingerprintKey generates the Redis key for account fingerprint cache. @@ -20,6 +22,11 @@ func fingerprintKey(accountID int64) string { return fmt.Sprintf("%s%d", fingerprintKeyPrefix, accountID) } +// maskedSessionKey generates the Redis key for masked session ID cache. +func maskedSessionKey(accountID int64) string { + return fmt.Sprintf("%s%d", maskedSessionKeyPrefix, accountID) +} + type identityCache struct { rdb *redis.Client } @@ -49,3 +56,20 @@ func (c *identityCache) SetFingerprint(ctx context.Context, accountID int64, fp } return c.rdb.Set(ctx, key, val, fingerprintTTL).Err() } + +func (c *identityCache) GetMaskedSessionID(ctx context.Context, accountID int64) (string, error) { + key := maskedSessionKey(accountID) + val, err := c.rdb.Get(ctx, key).Result() + if err != nil { + if err == redis.Nil { + return "", nil + } + return "", err + } + return val, nil +} + +func (c *identityCache) SetMaskedSessionID(ctx context.Context, accountID int64, sessionID string) error { + key := maskedSessionKey(accountID) + return c.rdb.Set(ctx, key, sessionID, maskedSessionTTL).Err() +} diff --git a/backend/internal/repository/openai_oauth_service.go b/backend/internal/repository/openai_oauth_service.go index 07d57410..394d3a1a 100644 --- a/backend/internal/repository/openai_oauth_service.go +++ b/backend/internal/repository/openai_oauth_service.go @@ -2,10 +2,11 @@ package repository import ( "context" - "fmt" + "net/http" "net/url" "time" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/openai" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/imroc/req/v3" @@ -38,16 +39,17 @@ func (s *openaiOAuthService) ExchangeCode(ctx context.Context, code, codeVerifie resp, err := client.R(). SetContext(ctx). + SetHeader("User-Agent", "codex-cli/0.91.0"). SetFormDataFromValues(formData). SetSuccessResult(&tokenResp). Post(s.tokenURL) if err != nil { - return nil, fmt.Errorf("request failed: %w", err) + return nil, infraerrors.Newf(http.StatusBadGateway, "OPENAI_OAUTH_REQUEST_FAILED", "request failed: %v", err) } if !resp.IsSuccessState() { - return nil, fmt.Errorf("token exchange failed: status %d, body: %s", resp.StatusCode, resp.String()) + return nil, infraerrors.Newf(http.StatusBadGateway, "OPENAI_OAUTH_TOKEN_EXCHANGE_FAILED", "token exchange failed: status %d, body: %s", resp.StatusCode, resp.String()) } return &tokenResp, nil @@ -66,16 +68,17 @@ func (s *openaiOAuthService) RefreshToken(ctx context.Context, refreshToken, pro resp, err := client.R(). SetContext(ctx). + SetHeader("User-Agent", "codex-cli/0.91.0"). SetFormDataFromValues(formData). SetSuccessResult(&tokenResp). Post(s.tokenURL) if err != nil { - return nil, fmt.Errorf("request failed: %w", err) + return nil, infraerrors.Newf(http.StatusBadGateway, "OPENAI_OAUTH_REQUEST_FAILED", "request failed: %v", err) } if !resp.IsSuccessState() { - return nil, fmt.Errorf("token refresh failed: status %d, body: %s", resp.StatusCode, resp.String()) + return nil, infraerrors.Newf(http.StatusBadGateway, "OPENAI_OAUTH_TOKEN_REFRESH_FAILED", "token refresh failed: status %d, body: %s", resp.StatusCode, resp.String()) } return &tokenResp, nil @@ -84,6 +87,6 @@ func (s *openaiOAuthService) RefreshToken(ctx context.Context, refreshToken, pro func createOpenAIReqClient(proxyURL string) *req.Client { return getSharedReqClient(reqClientOptions{ ProxyURL: proxyURL, - Timeout: 60 * time.Second, + Timeout: 120 * time.Second, }) } diff --git a/backend/internal/repository/openai_oauth_service_test.go b/backend/internal/repository/openai_oauth_service_test.go index 51142306..f9df08c8 100644 --- a/backend/internal/repository/openai_oauth_service_test.go +++ b/backend/internal/repository/openai_oauth_service_test.go @@ -244,6 +244,13 @@ func (s *OpenAIOAuthServiceSuite) TestRefreshToken_NonSuccessStatus() { require.ErrorContains(s.T(), err, "status 401") } +func TestNewOpenAIOAuthClient_DefaultTokenURL(t *testing.T) { + client := NewOpenAIOAuthClient() + svc, ok := client.(*openaiOAuthService) + require.True(t, ok) + require.Equal(t, openai.TokenURL, svc.tokenURL) +} + func TestOpenAIOAuthServiceSuite(t *testing.T) { suite.Run(t, new(OpenAIOAuthServiceSuite)) } diff --git a/backend/internal/repository/ops_repo.go b/backend/internal/repository/ops_repo.go index 613c5bd5..b04154b7 100644 --- a/backend/internal/repository/ops_repo.go +++ b/backend/internal/repository/ops_repo.go @@ -992,7 +992,8 @@ func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) { } // View filter: errors vs excluded vs all. - // Excluded = upstream 429/529 and business-limited (quota/concurrency/billing) errors. + // Excluded = business-limited errors (quota/concurrency/billing). + // Upstream 429/529 are included in errors view to match SLA calculation. view := "" if filter != nil { view = strings.ToLower(strings.TrimSpace(filter.View)) @@ -1000,15 +1001,13 @@ func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) { switch view { case "", "errors": clauses = append(clauses, "COALESCE(is_business_limited,false) = false") - clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") case "excluded": - clauses = append(clauses, "(COALESCE(is_business_limited,false) = true OR COALESCE(upstream_status_code, status_code, 0) IN (429, 529))") + clauses = append(clauses, "COALESCE(is_business_limited,false) = true") case "all": // no-op default: // treat unknown as default 'errors' clauses = append(clauses, "COALESCE(is_business_limited,false) = false") - clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") } if len(filter.StatusCodes) > 0 { args = append(args, pq.Array(filter.StatusCodes)) diff --git a/backend/internal/repository/redis.go b/backend/internal/repository/redis.go index f3606ad9..2b4ee4e6 100644 --- a/backend/internal/repository/redis.go +++ b/backend/internal/repository/redis.go @@ -1,6 +1,7 @@ package repository import ( + "crypto/tls" "time" "github.com/Wei-Shaw/sub2api/internal/config" @@ -26,7 +27,7 @@ func InitRedis(cfg *config.Config) *redis.Client { // buildRedisOptions 构建 Redis 连接选项 // 从配置文件读取连接池和超时参数,支持生产环境调优 func buildRedisOptions(cfg *config.Config) *redis.Options { - return &redis.Options{ + opts := &redis.Options{ Addr: cfg.Redis.Address(), Password: cfg.Redis.Password, DB: cfg.Redis.DB, @@ -36,4 +37,13 @@ func buildRedisOptions(cfg *config.Config) *redis.Options { PoolSize: cfg.Redis.PoolSize, // 连接池大小 MinIdleConns: cfg.Redis.MinIdleConns, // 最小空闲连接 } + + if cfg.Redis.EnableTLS { + opts.TLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: cfg.Redis.Host, + } + } + + return opts } diff --git a/backend/internal/repository/redis_test.go b/backend/internal/repository/redis_test.go index 756a63dc..7cb31002 100644 --- a/backend/internal/repository/redis_test.go +++ b/backend/internal/repository/redis_test.go @@ -32,4 +32,16 @@ func TestBuildRedisOptions(t *testing.T) { require.Equal(t, 4*time.Second, opts.WriteTimeout) require.Equal(t, 100, opts.PoolSize) require.Equal(t, 10, opts.MinIdleConns) + require.Nil(t, opts.TLSConfig) + + // Test case with TLS enabled + cfgTLS := &config.Config{ + Redis: config.RedisConfig{ + Host: "localhost", + EnableTLS: true, + }, + } + optsTLS := buildRedisOptions(cfgTLS) + require.NotNil(t, optsTLS.TLSConfig) + require.Equal(t, "localhost", optsTLS.TLSConfig.ServerName) } diff --git a/backend/internal/repository/req_client_pool.go b/backend/internal/repository/req_client_pool.go index b23462a4..af71a7ee 100644 --- a/backend/internal/repository/req_client_pool.go +++ b/backend/internal/repository/req_client_pool.go @@ -14,6 +14,7 @@ type reqClientOptions struct { ProxyURL string // 代理 URL(支持 http/https/socks5) Timeout time.Duration // 请求超时时间 Impersonate bool // 是否模拟 Chrome 浏览器指纹 + ForceHTTP2 bool // 是否强制使用 HTTP/2 } // sharedReqClients 存储按配置参数缓存的 req 客户端实例 @@ -41,6 +42,9 @@ func getSharedReqClient(opts reqClientOptions) *req.Client { } client := req.C().SetTimeout(opts.Timeout) + if opts.ForceHTTP2 { + client = client.EnableForceHTTP2() + } if opts.Impersonate { client = client.ImpersonateChrome() } @@ -56,9 +60,10 @@ func getSharedReqClient(opts reqClientOptions) *req.Client { } func buildReqClientKey(opts reqClientOptions) string { - return fmt.Sprintf("%s|%s|%t", + return fmt.Sprintf("%s|%s|%t|%t", strings.TrimSpace(opts.ProxyURL), opts.Timeout.String(), opts.Impersonate, + opts.ForceHTTP2, ) } diff --git a/backend/internal/repository/req_client_pool_test.go b/backend/internal/repository/req_client_pool_test.go new file mode 100644 index 00000000..904ed4d6 --- /dev/null +++ b/backend/internal/repository/req_client_pool_test.go @@ -0,0 +1,90 @@ +package repository + +import ( + "reflect" + "sync" + "testing" + "time" + "unsafe" + + "github.com/imroc/req/v3" + "github.com/stretchr/testify/require" +) + +func forceHTTPVersion(t *testing.T, client *req.Client) string { + t.Helper() + transport := client.GetTransport() + field := reflect.ValueOf(transport).Elem().FieldByName("forceHttpVersion") + require.True(t, field.IsValid(), "forceHttpVersion field not found") + require.True(t, field.CanAddr(), "forceHttpVersion field not addressable") + return reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().String() +} + +func TestGetSharedReqClient_ForceHTTP2SeparatesCache(t *testing.T) { + sharedReqClients = sync.Map{} + base := reqClientOptions{ + ProxyURL: "http://proxy.local:8080", + Timeout: time.Second, + } + clientDefault := getSharedReqClient(base) + + force := base + force.ForceHTTP2 = true + clientForce := getSharedReqClient(force) + + require.NotSame(t, clientDefault, clientForce) + require.NotEqual(t, buildReqClientKey(base), buildReqClientKey(force)) +} + +func TestGetSharedReqClient_ReuseCachedClient(t *testing.T) { + sharedReqClients = sync.Map{} + opts := reqClientOptions{ + ProxyURL: "http://proxy.local:8080", + Timeout: 2 * time.Second, + } + first := getSharedReqClient(opts) + second := getSharedReqClient(opts) + require.Same(t, first, second) +} + +func TestGetSharedReqClient_IgnoresNonClientCache(t *testing.T) { + sharedReqClients = sync.Map{} + opts := reqClientOptions{ + ProxyURL: " http://proxy.local:8080 ", + Timeout: 3 * time.Second, + } + key := buildReqClientKey(opts) + sharedReqClients.Store(key, "invalid") + + client := getSharedReqClient(opts) + + require.NotNil(t, client) + loaded, ok := sharedReqClients.Load(key) + require.True(t, ok) + require.IsType(t, "invalid", loaded) +} + +func TestGetSharedReqClient_ImpersonateAndProxy(t *testing.T) { + sharedReqClients = sync.Map{} + opts := reqClientOptions{ + ProxyURL: " http://proxy.local:8080 ", + Timeout: 4 * time.Second, + Impersonate: true, + } + client := getSharedReqClient(opts) + + require.NotNil(t, client) + require.Equal(t, "http://proxy.local:8080|4s|true|false", buildReqClientKey(opts)) +} + +func TestCreateOpenAIReqClient_Timeout120Seconds(t *testing.T) { + sharedReqClients = sync.Map{} + client := createOpenAIReqClient("http://proxy.local:8080") + require.Equal(t, 120*time.Second, client.GetClient().Timeout) +} + +func TestCreateGeminiReqClient_ForceHTTP2Disabled(t *testing.T) { + sharedReqClients = sync.Map{} + client := createGeminiReqClient("http://proxy.local:8080") + require.Equal(t, "", forceHTTPVersion(t, client)) +} diff --git a/backend/internal/repository/scheduler_cache.go b/backend/internal/repository/scheduler_cache.go index 13b22107..4f447e4f 100644 --- a/backend/internal/repository/scheduler_cache.go +++ b/backend/internal/repository/scheduler_cache.go @@ -58,7 +58,9 @@ func (c *schedulerCache) GetSnapshot(ctx context.Context, bucket service.Schedul return nil, false, err } if len(ids) == 0 { - return []*service.Account{}, true, nil + // 空快照视为缓存未命中,触发数据库回退查询 + // 这解决了新分组创建后立即绑定账号时的竞态条件问题 + return nil, false, nil } keys := make([]string, 0, len(ids)) diff --git a/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go index e442a125..a88b74ef 100644 --- a/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go +++ b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go @@ -19,7 +19,7 @@ func TestSchedulerSnapshotOutboxReplay(t *testing.T) { _, _ = integrationDB.ExecContext(ctx, "TRUNCATE scheduler_outbox") - accountRepo := newAccountRepositoryWithSQL(client, integrationDB) + accountRepo := newAccountRepositoryWithSQL(client, integrationDB, nil) outboxRepo := NewSchedulerOutboxRepository(integrationDB) cache := NewSchedulerCache(rdb) diff --git a/backend/internal/repository/session_limit_cache.go b/backend/internal/repository/session_limit_cache.go index 16f2a69c..3dc89f87 100644 --- a/backend/internal/repository/session_limit_cache.go +++ b/backend/internal/repository/session_limit_cache.go @@ -217,7 +217,7 @@ func (c *sessionLimitCache) GetActiveSessionCount(ctx context.Context, accountID } // GetActiveSessionCountBatch 批量获取多个账号的活跃会话数 -func (c *sessionLimitCache) GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { +func (c *sessionLimitCache) GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64, idleTimeouts map[int64]time.Duration) (map[int64]int, error) { if len(accountIDs) == 0 { return make(map[int64]int), nil } @@ -226,11 +226,18 @@ func (c *sessionLimitCache) GetActiveSessionCountBatch(ctx context.Context, acco // 使用 pipeline 批量执行 pipe := c.rdb.Pipeline() - idleTimeoutSeconds := int(c.defaultIdleTimeout.Seconds()) cmds := make(map[int64]*redis.Cmd, len(accountIDs)) for _, accountID := range accountIDs { key := sessionLimitKey(accountID) + // 使用各账号自己的 idleTimeout,如果没有则用默认值 + idleTimeout := c.defaultIdleTimeout + if idleTimeouts != nil { + if t, ok := idleTimeouts[accountID]; ok && t > 0 { + idleTimeout = t + } + } + idleTimeoutSeconds := int(idleTimeout.Seconds()) cmds[accountID] = getActiveSessionCountScript.Run(ctx, pipe, []string{key}, idleTimeoutSeconds) } diff --git a/backend/internal/repository/simple_mode_default_groups.go b/backend/internal/repository/simple_mode_default_groups.go new file mode 100644 index 00000000..56309184 --- /dev/null +++ b/backend/internal/repository/simple_mode_default_groups.go @@ -0,0 +1,82 @@ +package repository + +import ( + "context" + "fmt" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func ensureSimpleModeDefaultGroups(ctx context.Context, client *dbent.Client) error { + if client == nil { + return fmt.Errorf("nil ent client") + } + + requiredByPlatform := map[string]int{ + service.PlatformAnthropic: 1, + service.PlatformOpenAI: 1, + service.PlatformGemini: 1, + service.PlatformAntigravity: 2, + } + + for platform, minCount := range requiredByPlatform { + count, err := client.Group.Query(). + Where(group.PlatformEQ(platform), group.DeletedAtIsNil()). + Count(ctx) + if err != nil { + return fmt.Errorf("count groups for platform %s: %w", platform, err) + } + + if platform == service.PlatformAntigravity { + if count < minCount { + for i := count; i < minCount; i++ { + name := fmt.Sprintf("%s-default-%d", platform, i+1) + if err := createGroupIfNotExists(ctx, client, name, platform); err != nil { + return err + } + } + } + continue + } + + // Non-antigravity platforms: ensure -default exists. + name := platform + "-default" + if err := createGroupIfNotExists(ctx, client, name, platform); err != nil { + return err + } + } + + return nil +} + +func createGroupIfNotExists(ctx context.Context, client *dbent.Client, name, platform string) error { + exists, err := client.Group.Query(). + Where(group.NameEQ(name), group.DeletedAtIsNil()). + Exist(ctx) + if err != nil { + return fmt.Errorf("check group exists %s: %w", name, err) + } + if exists { + return nil + } + + _, err = client.Group.Create(). + SetName(name). + SetDescription("Auto-created default group"). + SetPlatform(platform). + SetStatus(service.StatusActive). + SetSubscriptionType(service.SubscriptionTypeStandard). + SetRateMultiplier(1.0). + SetIsExclusive(false). + Save(ctx) + if err != nil { + if dbent.IsConstraintError(err) { + // Concurrent server startups may race on creation; treat as success. + return nil + } + return fmt.Errorf("create default group %s: %w", name, err) + } + return nil +} diff --git a/backend/internal/repository/simple_mode_default_groups_integration_test.go b/backend/internal/repository/simple_mode_default_groups_integration_test.go new file mode 100644 index 00000000..3327257b --- /dev/null +++ b/backend/internal/repository/simple_mode_default_groups_integration_test.go @@ -0,0 +1,84 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func TestEnsureSimpleModeDefaultGroups_CreatesMissingDefaults(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + client := tx.Client() + + seedCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + require.NoError(t, ensureSimpleModeDefaultGroups(seedCtx, client)) + + assertGroupExists := func(name string) { + exists, err := client.Group.Query().Where(group.NameEQ(name), group.DeletedAtIsNil()).Exist(seedCtx) + require.NoError(t, err) + require.True(t, exists, "expected group %s to exist", name) + } + + assertGroupExists(service.PlatformAnthropic + "-default") + assertGroupExists(service.PlatformOpenAI + "-default") + assertGroupExists(service.PlatformGemini + "-default") + assertGroupExists(service.PlatformAntigravity + "-default-1") + assertGroupExists(service.PlatformAntigravity + "-default-2") +} + +func TestEnsureSimpleModeDefaultGroups_IgnoresSoftDeletedGroups(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + client := tx.Client() + + seedCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Create and then soft-delete an anthropic default group. + g, err := client.Group.Create(). + SetName(service.PlatformAnthropic + "-default"). + SetPlatform(service.PlatformAnthropic). + SetStatus(service.StatusActive). + SetSubscriptionType(service.SubscriptionTypeStandard). + SetRateMultiplier(1.0). + SetIsExclusive(false). + Save(seedCtx) + require.NoError(t, err) + + _, err = client.Group.Delete().Where(group.IDEQ(g.ID)).Exec(seedCtx) + require.NoError(t, err) + + require.NoError(t, ensureSimpleModeDefaultGroups(seedCtx, client)) + + // New active one should exist. + count, err := client.Group.Query().Where(group.NameEQ(service.PlatformAnthropic+"-default"), group.DeletedAtIsNil()).Count(seedCtx) + require.NoError(t, err) + require.Equal(t, 1, count) +} + +func TestEnsureSimpleModeDefaultGroups_AntigravityNeedsTwoGroupsOnlyByCount(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + client := tx.Client() + + seedCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + mustCreateGroup(t, client, &service.Group{Name: "ag-custom-1-" + time.Now().Format(time.RFC3339Nano), Platform: service.PlatformAntigravity}) + mustCreateGroup(t, client, &service.Group{Name: "ag-custom-2-" + time.Now().Format(time.RFC3339Nano), Platform: service.PlatformAntigravity}) + + require.NoError(t, ensureSimpleModeDefaultGroups(seedCtx, client)) + + count, err := client.Group.Query().Where(group.PlatformEQ(service.PlatformAntigravity), group.DeletedAtIsNil()).Count(seedCtx) + require.NoError(t, err) + require.GreaterOrEqual(t, count, 2) +} diff --git a/backend/internal/repository/totp_cache.go b/backend/internal/repository/totp_cache.go new file mode 100644 index 00000000..2f4a8ab2 --- /dev/null +++ b/backend/internal/repository/totp_cache.go @@ -0,0 +1,149 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/redis/go-redis/v9" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +const ( + totpSetupKeyPrefix = "totp:setup:" + totpLoginKeyPrefix = "totp:login:" + totpAttemptsKeyPrefix = "totp:attempts:" + totpAttemptsTTL = 15 * time.Minute +) + +// TotpCache implements service.TotpCache using Redis +type TotpCache struct { + rdb *redis.Client +} + +// NewTotpCache creates a new TOTP cache +func NewTotpCache(rdb *redis.Client) service.TotpCache { + return &TotpCache{rdb: rdb} +} + +// GetSetupSession retrieves a TOTP setup session +func (c *TotpCache) GetSetupSession(ctx context.Context, userID int64) (*service.TotpSetupSession, error) { + key := fmt.Sprintf("%s%d", totpSetupKeyPrefix, userID) + data, err := c.rdb.Get(ctx, key).Bytes() + if err != nil { + if err == redis.Nil { + return nil, nil + } + return nil, fmt.Errorf("get setup session: %w", err) + } + + var session service.TotpSetupSession + if err := json.Unmarshal(data, &session); err != nil { + return nil, fmt.Errorf("unmarshal setup session: %w", err) + } + + return &session, nil +} + +// SetSetupSession stores a TOTP setup session +func (c *TotpCache) SetSetupSession(ctx context.Context, userID int64, session *service.TotpSetupSession, ttl time.Duration) error { + key := fmt.Sprintf("%s%d", totpSetupKeyPrefix, userID) + data, err := json.Marshal(session) + if err != nil { + return fmt.Errorf("marshal setup session: %w", err) + } + + if err := c.rdb.Set(ctx, key, data, ttl).Err(); err != nil { + return fmt.Errorf("set setup session: %w", err) + } + + return nil +} + +// DeleteSetupSession deletes a TOTP setup session +func (c *TotpCache) DeleteSetupSession(ctx context.Context, userID int64) error { + key := fmt.Sprintf("%s%d", totpSetupKeyPrefix, userID) + return c.rdb.Del(ctx, key).Err() +} + +// GetLoginSession retrieves a TOTP login session +func (c *TotpCache) GetLoginSession(ctx context.Context, tempToken string) (*service.TotpLoginSession, error) { + key := totpLoginKeyPrefix + tempToken + data, err := c.rdb.Get(ctx, key).Bytes() + if err != nil { + if err == redis.Nil { + return nil, nil + } + return nil, fmt.Errorf("get login session: %w", err) + } + + var session service.TotpLoginSession + if err := json.Unmarshal(data, &session); err != nil { + return nil, fmt.Errorf("unmarshal login session: %w", err) + } + + return &session, nil +} + +// SetLoginSession stores a TOTP login session +func (c *TotpCache) SetLoginSession(ctx context.Context, tempToken string, session *service.TotpLoginSession, ttl time.Duration) error { + key := totpLoginKeyPrefix + tempToken + data, err := json.Marshal(session) + if err != nil { + return fmt.Errorf("marshal login session: %w", err) + } + + if err := c.rdb.Set(ctx, key, data, ttl).Err(); err != nil { + return fmt.Errorf("set login session: %w", err) + } + + return nil +} + +// DeleteLoginSession deletes a TOTP login session +func (c *TotpCache) DeleteLoginSession(ctx context.Context, tempToken string) error { + key := totpLoginKeyPrefix + tempToken + return c.rdb.Del(ctx, key).Err() +} + +// IncrementVerifyAttempts increments the verify attempt counter +func (c *TotpCache) IncrementVerifyAttempts(ctx context.Context, userID int64) (int, error) { + key := fmt.Sprintf("%s%d", totpAttemptsKeyPrefix, userID) + + // Use pipeline for atomic increment and set TTL + pipe := c.rdb.Pipeline() + incrCmd := pipe.Incr(ctx, key) + pipe.Expire(ctx, key, totpAttemptsTTL) + + if _, err := pipe.Exec(ctx); err != nil { + return 0, fmt.Errorf("increment verify attempts: %w", err) + } + + count, err := incrCmd.Result() + if err != nil { + return 0, fmt.Errorf("get increment result: %w", err) + } + + return int(count), nil +} + +// GetVerifyAttempts gets the current verify attempt count +func (c *TotpCache) GetVerifyAttempts(ctx context.Context, userID int64) (int, error) { + key := fmt.Sprintf("%s%d", totpAttemptsKeyPrefix, userID) + count, err := c.rdb.Get(ctx, key).Int() + if err != nil { + if err == redis.Nil { + return 0, nil + } + return 0, fmt.Errorf("get verify attempts: %w", err) + } + return count, nil +} + +// ClearVerifyAttempts clears the verify attempt counter +func (c *TotpCache) ClearVerifyAttempts(ctx context.Context, userID int64) error { + key := fmt.Sprintf("%s%d", totpAttemptsKeyPrefix, userID) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/usage_cleanup_repo.go b/backend/internal/repository/usage_cleanup_repo.go new file mode 100644 index 00000000..9c021357 --- /dev/null +++ b/backend/internal/repository/usage_cleanup_repo.go @@ -0,0 +1,551 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + dbusagecleanuptask "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type usageCleanupRepository struct { + client *dbent.Client + sql sqlExecutor +} + +func NewUsageCleanupRepository(client *dbent.Client, sqlDB *sql.DB) service.UsageCleanupRepository { + return newUsageCleanupRepositoryWithSQL(client, sqlDB) +} + +func newUsageCleanupRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *usageCleanupRepository { + return &usageCleanupRepository{client: client, sql: sqlq} +} + +func (r *usageCleanupRepository) CreateTask(ctx context.Context, task *service.UsageCleanupTask) error { + if task == nil { + return nil + } + if r.client != nil { + return r.createTaskWithEnt(ctx, task) + } + return r.createTaskWithSQL(ctx, task) +} + +func (r *usageCleanupRepository) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) { + if r.client != nil { + return r.listTasksWithEnt(ctx, params) + } + var total int64 + if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM usage_cleanup_tasks", nil, &total); err != nil { + return nil, nil, err + } + if total == 0 { + return []service.UsageCleanupTask{}, paginationResultFromTotal(0, params), nil + } + + query := ` + SELECT id, status, filters, created_by, deleted_rows, error_message, + canceled_by, canceled_at, + started_at, finished_at, created_at, updated_at + FROM usage_cleanup_tasks + ORDER BY created_at DESC, id DESC + LIMIT $1 OFFSET $2 + ` + rows, err := r.sql.QueryContext(ctx, query, params.Limit(), params.Offset()) + if err != nil { + return nil, nil, err + } + defer func() { _ = rows.Close() }() + + tasks := make([]service.UsageCleanupTask, 0) + for rows.Next() { + var task service.UsageCleanupTask + var filtersJSON []byte + var errMsg sql.NullString + var canceledBy sql.NullInt64 + var canceledAt sql.NullTime + var startedAt sql.NullTime + var finishedAt sql.NullTime + if err := rows.Scan( + &task.ID, + &task.Status, + &filtersJSON, + &task.CreatedBy, + &task.DeletedRows, + &errMsg, + &canceledBy, + &canceledAt, + &startedAt, + &finishedAt, + &task.CreatedAt, + &task.UpdatedAt, + ); err != nil { + return nil, nil, err + } + if err := json.Unmarshal(filtersJSON, &task.Filters); err != nil { + return nil, nil, fmt.Errorf("parse cleanup filters: %w", err) + } + if errMsg.Valid { + task.ErrorMsg = &errMsg.String + } + if canceledBy.Valid { + v := canceledBy.Int64 + task.CanceledBy = &v + } + if canceledAt.Valid { + task.CanceledAt = &canceledAt.Time + } + if startedAt.Valid { + task.StartedAt = &startedAt.Time + } + if finishedAt.Valid { + task.FinishedAt = &finishedAt.Time + } + tasks = append(tasks, task) + } + if err := rows.Err(); err != nil { + return nil, nil, err + } + return tasks, paginationResultFromTotal(total, params), nil +} + +func (r *usageCleanupRepository) ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*service.UsageCleanupTask, error) { + if staleRunningAfterSeconds <= 0 { + staleRunningAfterSeconds = 1800 + } + query := ` + WITH next AS ( + SELECT id + FROM usage_cleanup_tasks + WHERE status = $1 + OR ( + status = $2 + AND started_at IS NOT NULL + AND started_at < NOW() - ($3 * interval '1 second') + ) + ORDER BY created_at ASC + LIMIT 1 + FOR UPDATE SKIP LOCKED + ) + UPDATE usage_cleanup_tasks AS tasks + SET status = $4, + started_at = NOW(), + finished_at = NULL, + error_message = NULL, + updated_at = NOW() + FROM next + WHERE tasks.id = next.id + RETURNING tasks.id, tasks.status, tasks.filters, tasks.created_by, tasks.deleted_rows, tasks.error_message, + tasks.started_at, tasks.finished_at, tasks.created_at, tasks.updated_at + ` + var task service.UsageCleanupTask + var filtersJSON []byte + var errMsg sql.NullString + var startedAt sql.NullTime + var finishedAt sql.NullTime + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{ + service.UsageCleanupStatusPending, + service.UsageCleanupStatusRunning, + staleRunningAfterSeconds, + service.UsageCleanupStatusRunning, + }, + &task.ID, + &task.Status, + &filtersJSON, + &task.CreatedBy, + &task.DeletedRows, + &errMsg, + &startedAt, + &finishedAt, + &task.CreatedAt, + &task.UpdatedAt, + ); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, err + } + if err := json.Unmarshal(filtersJSON, &task.Filters); err != nil { + return nil, fmt.Errorf("parse cleanup filters: %w", err) + } + if errMsg.Valid { + task.ErrorMsg = &errMsg.String + } + if startedAt.Valid { + task.StartedAt = &startedAt.Time + } + if finishedAt.Valid { + task.FinishedAt = &finishedAt.Time + } + return &task, nil +} + +func (r *usageCleanupRepository) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { + if r.client != nil { + return r.getTaskStatusWithEnt(ctx, taskID) + } + var status string + if err := scanSingleRow(ctx, r.sql, "SELECT status FROM usage_cleanup_tasks WHERE id = $1", []any{taskID}, &status); err != nil { + return "", err + } + return status, nil +} + +func (r *usageCleanupRepository) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error { + if r.client != nil { + return r.updateTaskProgressWithEnt(ctx, taskID, deletedRows) + } + query := ` + UPDATE usage_cleanup_tasks + SET deleted_rows = $1, + updated_at = NOW() + WHERE id = $2 + ` + _, err := r.sql.ExecContext(ctx, query, deletedRows, taskID) + return err +} + +func (r *usageCleanupRepository) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + if r.client != nil { + return r.cancelTaskWithEnt(ctx, taskID, canceledBy) + } + query := ` + UPDATE usage_cleanup_tasks + SET status = $1, + canceled_by = $3, + canceled_at = NOW(), + finished_at = NOW(), + error_message = NULL, + updated_at = NOW() + WHERE id = $2 + AND status IN ($4, $5) + RETURNING id + ` + var id int64 + err := scanSingleRow(ctx, r.sql, query, []any{ + service.UsageCleanupStatusCanceled, + taskID, + canceledBy, + service.UsageCleanupStatusPending, + service.UsageCleanupStatusRunning, + }, &id) + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (r *usageCleanupRepository) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error { + if r.client != nil { + return r.markTaskSucceededWithEnt(ctx, taskID, deletedRows) + } + query := ` + UPDATE usage_cleanup_tasks + SET status = $1, + deleted_rows = $2, + finished_at = NOW(), + updated_at = NOW() + WHERE id = $3 + ` + _, err := r.sql.ExecContext(ctx, query, service.UsageCleanupStatusSucceeded, deletedRows, taskID) + return err +} + +func (r *usageCleanupRepository) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + if r.client != nil { + return r.markTaskFailedWithEnt(ctx, taskID, deletedRows, errorMsg) + } + query := ` + UPDATE usage_cleanup_tasks + SET status = $1, + deleted_rows = $2, + error_message = $3, + finished_at = NOW(), + updated_at = NOW() + WHERE id = $4 + ` + _, err := r.sql.ExecContext(ctx, query, service.UsageCleanupStatusFailed, deletedRows, errorMsg, taskID) + return err +} + +func (r *usageCleanupRepository) DeleteUsageLogsBatch(ctx context.Context, filters service.UsageCleanupFilters, limit int) (int64, error) { + if filters.StartTime.IsZero() || filters.EndTime.IsZero() { + return 0, fmt.Errorf("cleanup filters missing time range") + } + whereClause, args := buildUsageCleanupWhere(filters) + if whereClause == "" { + return 0, fmt.Errorf("cleanup filters missing time range") + } + args = append(args, limit) + query := fmt.Sprintf(` + WITH target AS ( + SELECT id + FROM usage_logs + WHERE %s + ORDER BY created_at ASC, id ASC + LIMIT $%d + ) + DELETE FROM usage_logs + WHERE id IN (SELECT id FROM target) + RETURNING id + `, whereClause, len(args)) + + rows, err := r.sql.QueryContext(ctx, query, args...) + if err != nil { + return 0, err + } + defer func() { _ = rows.Close() }() + + var deleted int64 + for rows.Next() { + deleted++ + } + if err := rows.Err(); err != nil { + return 0, err + } + return deleted, nil +} + +func buildUsageCleanupWhere(filters service.UsageCleanupFilters) (string, []any) { + conditions := make([]string, 0, 8) + args := make([]any, 0, 8) + idx := 1 + if !filters.StartTime.IsZero() { + conditions = append(conditions, fmt.Sprintf("created_at >= $%d", idx)) + args = append(args, filters.StartTime) + idx++ + } + if !filters.EndTime.IsZero() { + conditions = append(conditions, fmt.Sprintf("created_at <= $%d", idx)) + args = append(args, filters.EndTime) + idx++ + } + if filters.UserID != nil { + conditions = append(conditions, fmt.Sprintf("user_id = $%d", idx)) + args = append(args, *filters.UserID) + idx++ + } + if filters.APIKeyID != nil { + conditions = append(conditions, fmt.Sprintf("api_key_id = $%d", idx)) + args = append(args, *filters.APIKeyID) + idx++ + } + if filters.AccountID != nil { + conditions = append(conditions, fmt.Sprintf("account_id = $%d", idx)) + args = append(args, *filters.AccountID) + idx++ + } + if filters.GroupID != nil { + conditions = append(conditions, fmt.Sprintf("group_id = $%d", idx)) + args = append(args, *filters.GroupID) + idx++ + } + if filters.Model != nil { + model := strings.TrimSpace(*filters.Model) + if model != "" { + conditions = append(conditions, fmt.Sprintf("model = $%d", idx)) + args = append(args, model) + idx++ + } + } + if filters.Stream != nil { + conditions = append(conditions, fmt.Sprintf("stream = $%d", idx)) + args = append(args, *filters.Stream) + idx++ + } + if filters.BillingType != nil { + conditions = append(conditions, fmt.Sprintf("billing_type = $%d", idx)) + args = append(args, *filters.BillingType) + } + return strings.Join(conditions, " AND "), args +} + +func (r *usageCleanupRepository) createTaskWithEnt(ctx context.Context, task *service.UsageCleanupTask) error { + client := clientFromContext(ctx, r.client) + filtersJSON, err := json.Marshal(task.Filters) + if err != nil { + return fmt.Errorf("marshal cleanup filters: %w", err) + } + created, err := client.UsageCleanupTask. + Create(). + SetStatus(task.Status). + SetFilters(json.RawMessage(filtersJSON)). + SetCreatedBy(task.CreatedBy). + SetDeletedRows(task.DeletedRows). + Save(ctx) + if err != nil { + return err + } + task.ID = created.ID + task.CreatedAt = created.CreatedAt + task.UpdatedAt = created.UpdatedAt + return nil +} + +func (r *usageCleanupRepository) createTaskWithSQL(ctx context.Context, task *service.UsageCleanupTask) error { + filtersJSON, err := json.Marshal(task.Filters) + if err != nil { + return fmt.Errorf("marshal cleanup filters: %w", err) + } + query := ` + INSERT INTO usage_cleanup_tasks ( + status, + filters, + created_by, + deleted_rows + ) VALUES ($1, $2, $3, $4) + RETURNING id, created_at, updated_at + ` + if err := scanSingleRow(ctx, r.sql, query, []any{task.Status, filtersJSON, task.CreatedBy, task.DeletedRows}, &task.ID, &task.CreatedAt, &task.UpdatedAt); err != nil { + return err + } + return nil +} + +func (r *usageCleanupRepository) listTasksWithEnt(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) { + client := clientFromContext(ctx, r.client) + query := client.UsageCleanupTask.Query() + total, err := query.Clone().Count(ctx) + if err != nil { + return nil, nil, err + } + if total == 0 { + return []service.UsageCleanupTask{}, paginationResultFromTotal(0, params), nil + } + rows, err := query. + Order(dbent.Desc(dbusagecleanuptask.FieldCreatedAt), dbent.Desc(dbusagecleanuptask.FieldID)). + Offset(params.Offset()). + Limit(params.Limit()). + All(ctx) + if err != nil { + return nil, nil, err + } + tasks := make([]service.UsageCleanupTask, 0, len(rows)) + for _, row := range rows { + task, err := usageCleanupTaskFromEnt(row) + if err != nil { + return nil, nil, err + } + tasks = append(tasks, task) + } + return tasks, paginationResultFromTotal(int64(total), params), nil +} + +func (r *usageCleanupRepository) getTaskStatusWithEnt(ctx context.Context, taskID int64) (string, error) { + client := clientFromContext(ctx, r.client) + task, err := client.UsageCleanupTask.Query(). + Where(dbusagecleanuptask.IDEQ(taskID)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return "", sql.ErrNoRows + } + return "", err + } + return task.Status, nil +} + +func (r *usageCleanupRepository) updateTaskProgressWithEnt(ctx context.Context, taskID int64, deletedRows int64) error { + client := clientFromContext(ctx, r.client) + now := time.Now() + _, err := client.UsageCleanupTask.Update(). + Where(dbusagecleanuptask.IDEQ(taskID)). + SetDeletedRows(deletedRows). + SetUpdatedAt(now). + Save(ctx) + return err +} + +func (r *usageCleanupRepository) cancelTaskWithEnt(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + client := clientFromContext(ctx, r.client) + now := time.Now() + affected, err := client.UsageCleanupTask.Update(). + Where( + dbusagecleanuptask.IDEQ(taskID), + dbusagecleanuptask.StatusIn(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning), + ). + SetStatus(service.UsageCleanupStatusCanceled). + SetCanceledBy(canceledBy). + SetCanceledAt(now). + SetFinishedAt(now). + ClearErrorMessage(). + SetUpdatedAt(now). + Save(ctx) + if err != nil { + return false, err + } + return affected > 0, nil +} + +func (r *usageCleanupRepository) markTaskSucceededWithEnt(ctx context.Context, taskID int64, deletedRows int64) error { + client := clientFromContext(ctx, r.client) + now := time.Now() + _, err := client.UsageCleanupTask.Update(). + Where(dbusagecleanuptask.IDEQ(taskID)). + SetStatus(service.UsageCleanupStatusSucceeded). + SetDeletedRows(deletedRows). + SetFinishedAt(now). + SetUpdatedAt(now). + Save(ctx) + return err +} + +func (r *usageCleanupRepository) markTaskFailedWithEnt(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + client := clientFromContext(ctx, r.client) + now := time.Now() + _, err := client.UsageCleanupTask.Update(). + Where(dbusagecleanuptask.IDEQ(taskID)). + SetStatus(service.UsageCleanupStatusFailed). + SetDeletedRows(deletedRows). + SetErrorMessage(errorMsg). + SetFinishedAt(now). + SetUpdatedAt(now). + Save(ctx) + return err +} + +func usageCleanupTaskFromEnt(row *dbent.UsageCleanupTask) (service.UsageCleanupTask, error) { + task := service.UsageCleanupTask{ + ID: row.ID, + Status: row.Status, + CreatedBy: row.CreatedBy, + DeletedRows: row.DeletedRows, + CreatedAt: row.CreatedAt, + UpdatedAt: row.UpdatedAt, + } + if len(row.Filters) > 0 { + if err := json.Unmarshal(row.Filters, &task.Filters); err != nil { + return service.UsageCleanupTask{}, fmt.Errorf("parse cleanup filters: %w", err) + } + } + if row.ErrorMessage != nil { + task.ErrorMsg = row.ErrorMessage + } + if row.CanceledBy != nil { + task.CanceledBy = row.CanceledBy + } + if row.CanceledAt != nil { + task.CanceledAt = row.CanceledAt + } + if row.StartedAt != nil { + task.StartedAt = row.StartedAt + } + if row.FinishedAt != nil { + task.FinishedAt = row.FinishedAt + } + return task, nil +} diff --git a/backend/internal/repository/usage_cleanup_repo_ent_test.go b/backend/internal/repository/usage_cleanup_repo_ent_test.go new file mode 100644 index 00000000..6c20b2b9 --- /dev/null +++ b/backend/internal/repository/usage_cleanup_repo_ent_test.go @@ -0,0 +1,251 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/enttest" + dbusagecleanuptask "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + _ "modernc.org/sqlite" +) + +func newUsageCleanupEntRepo(t *testing.T) (*usageCleanupRepository, *dbent.Client) { + t.Helper() + db, err := sql.Open("sqlite", "file:usage_cleanup?mode=memory&cache=shared") + require.NoError(t, err) + t.Cleanup(func() { _ = db.Close() }) + _, err = db.Exec("PRAGMA foreign_keys = ON") + require.NoError(t, err) + + drv := entsql.OpenDB(dialect.SQLite, db) + client := enttest.NewClient(t, enttest.WithOptions(dbent.Driver(drv))) + t.Cleanup(func() { _ = client.Close() }) + + repo := &usageCleanupRepository{client: client, sql: db} + return repo, client +} + +func TestUsageCleanupRepositoryEntCreateAndList(t *testing.T) { + repo, _ := newUsageCleanupEntRepo(t) + + start := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: start, EndTime: end}, + CreatedBy: 9, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + require.NotZero(t, task.ID) + + task2 := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusRunning, + Filters: service.UsageCleanupFilters{StartTime: start.Add(-24 * time.Hour), EndTime: end.Add(-24 * time.Hour)}, + CreatedBy: 10, + } + require.NoError(t, repo.CreateTask(context.Background(), task2)) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10}) + require.NoError(t, err) + require.Len(t, tasks, 2) + require.Equal(t, int64(2), result.Total) + require.Greater(t, tasks[0].ID, tasks[1].ID) + require.Equal(t, start, tasks[1].Filters.StartTime) + require.Equal(t, end, tasks[1].Filters.EndTime) +} + +func TestUsageCleanupRepositoryEntListEmpty(t *testing.T) { + repo, _ := newUsageCleanupEntRepo(t) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10}) + require.NoError(t, err) + require.Empty(t, tasks) + require.Equal(t, int64(0), result.Total) +} + +func TestUsageCleanupRepositoryEntGetStatusAndProgress(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 3, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + status, err := repo.GetTaskStatus(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusPending, status) + + _, err = repo.GetTaskStatus(context.Background(), task.ID+99) + require.ErrorIs(t, err, sql.ErrNoRows) + + require.NoError(t, repo.UpdateTaskProgress(context.Background(), task.ID, 42)) + loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, int64(42), loaded.DeletedRows) +} + +func TestUsageCleanupRepositoryEntCancelAndFinish(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 5, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + ok, err := repo.CancelTask(context.Background(), task.ID, 7) + require.NoError(t, err) + require.True(t, ok) + + loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusCanceled, loaded.Status) + require.NotNil(t, loaded.CanceledBy) + require.NotNil(t, loaded.CanceledAt) + require.NotNil(t, loaded.FinishedAt) + + loaded.Status = service.UsageCleanupStatusSucceeded + _, err = client.UsageCleanupTask.Update().Where(dbusagecleanuptask.IDEQ(task.ID)).SetStatus(loaded.Status).Save(context.Background()) + require.NoError(t, err) + + ok, err = repo.CancelTask(context.Background(), task.ID, 7) + require.NoError(t, err) + require.False(t, ok) +} + +func TestUsageCleanupRepositoryEntCancelError(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 5, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + require.NoError(t, client.Close()) + _, err := repo.CancelTask(context.Background(), task.ID, 7) + require.Error(t, err) +} + +func TestUsageCleanupRepositoryEntMarkResults(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusRunning, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 12, + } + require.NoError(t, repo.CreateTask(context.Background(), task)) + + require.NoError(t, repo.MarkTaskSucceeded(context.Background(), task.ID, 6)) + loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusSucceeded, loaded.Status) + require.Equal(t, int64(6), loaded.DeletedRows) + require.NotNil(t, loaded.FinishedAt) + + task2 := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusRunning, + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 12, + } + require.NoError(t, repo.CreateTask(context.Background(), task2)) + + require.NoError(t, repo.MarkTaskFailed(context.Background(), task2.ID, 4, "boom")) + loaded2, err := client.UsageCleanupTask.Get(context.Background(), task2.ID) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusFailed, loaded2.Status) + require.Equal(t, "boom", *loaded2.ErrorMessage) +} + +func TestUsageCleanupRepositoryEntInvalidStatus(t *testing.T) { + repo, _ := newUsageCleanupEntRepo(t) + + task := &service.UsageCleanupTask{ + Status: "invalid", + Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)}, + CreatedBy: 1, + } + require.Error(t, repo.CreateTask(context.Background(), task)) +} + +func TestUsageCleanupRepositoryEntListInvalidFilters(t *testing.T) { + repo, client := newUsageCleanupEntRepo(t) + + now := time.Now().UTC() + driver, ok := client.Driver().(*entsql.Driver) + require.True(t, ok) + _, err := driver.DB().ExecContext( + context.Background(), + `INSERT INTO usage_cleanup_tasks (status, filters, created_by, deleted_rows, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?)`, + service.UsageCleanupStatusPending, + []byte("invalid-json"), + int64(1), + int64(0), + now, + now, + ) + require.NoError(t, err) + + _, _, err = repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10}) + require.Error(t, err) +} + +func TestUsageCleanupTaskFromEntFull(t *testing.T) { + start := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + errMsg := "failed" + canceledBy := int64(2) + canceledAt := start.Add(time.Minute) + startedAt := start.Add(2 * time.Minute) + finishedAt := start.Add(3 * time.Minute) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + filtersJSON, err := json.Marshal(filters) + require.NoError(t, err) + + task, err := usageCleanupTaskFromEnt(&dbent.UsageCleanupTask{ + ID: 10, + Status: service.UsageCleanupStatusFailed, + Filters: filtersJSON, + CreatedBy: 11, + DeletedRows: 7, + ErrorMessage: &errMsg, + CanceledBy: &canceledBy, + CanceledAt: &canceledAt, + StartedAt: &startedAt, + FinishedAt: &finishedAt, + CreatedAt: start, + UpdatedAt: end, + }) + require.NoError(t, err) + require.Equal(t, int64(10), task.ID) + require.Equal(t, service.UsageCleanupStatusFailed, task.Status) + require.NotNil(t, task.ErrorMsg) + require.NotNil(t, task.CanceledBy) + require.NotNil(t, task.CanceledAt) + require.NotNil(t, task.StartedAt) + require.NotNil(t, task.FinishedAt) +} + +func TestUsageCleanupTaskFromEntInvalidFilters(t *testing.T) { + task, err := usageCleanupTaskFromEnt(&dbent.UsageCleanupTask{ + Filters: json.RawMessage("invalid-json"), + }) + require.Error(t, err) + require.Empty(t, task) +} diff --git a/backend/internal/repository/usage_cleanup_repo_test.go b/backend/internal/repository/usage_cleanup_repo_test.go new file mode 100644 index 00000000..0ca30ec7 --- /dev/null +++ b/backend/internal/repository/usage_cleanup_repo_test.go @@ -0,0 +1,482 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "testing" + "time" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func newSQLMock(t *testing.T) (*sql.DB, sqlmock.Sqlmock) { + t.Helper() + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) + require.NoError(t, err) + t.Cleanup(func() { _ = db.Close() }) + return db, mock +} + +func TestNewUsageCleanupRepository(t *testing.T) { + db, _ := newSQLMock(t) + repo := NewUsageCleanupRepository(nil, db) + require.NotNil(t, repo) +} + +func TestUsageCleanupRepositoryCreateTask(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: start, EndTime: end}, + CreatedBy: 12, + } + now := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC) + + mock.ExpectQuery("INSERT INTO usage_cleanup_tasks"). + WithArgs(task.Status, sqlmock.AnyArg(), task.CreatedBy, task.DeletedRows). + WillReturnRows(sqlmock.NewRows([]string{"id", "created_at", "updated_at"}).AddRow(int64(1), now, now)) + + err := repo.CreateTask(context.Background(), task) + require.NoError(t, err) + require.Equal(t, int64(1), task.ID) + require.Equal(t, now, task.CreatedAt) + require.Equal(t, now, task.UpdatedAt) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryCreateTaskNil(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + err := repo.CreateTask(context.Background(), nil) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryCreateTaskQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + task := &service.UsageCleanupTask{ + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{StartTime: time.Now(), EndTime: time.Now().Add(time.Hour)}, + CreatedBy: 1, + } + + mock.ExpectQuery("INSERT INTO usage_cleanup_tasks"). + WithArgs(task.Status, sqlmock.AnyArg(), task.CreatedBy, task.DeletedRows). + WillReturnError(sql.ErrConnDone) + + err := repo.CreateTask(context.Background(), task) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryListTasksEmpty(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(0))) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.NoError(t, err) + require.Empty(t, tasks) + require.Equal(t, int64(0), result.Total) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryListTasks(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(2 * time.Hour) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + filtersJSON, err := json.Marshal(filters) + require.NoError(t, err) + + createdAt := time.Date(2024, 1, 2, 12, 0, 0, 0, time.UTC) + updatedAt := createdAt.Add(time.Minute) + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "canceled_by", "canceled_at", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(1), + service.UsageCleanupStatusSucceeded, + filtersJSON, + int64(2), + int64(9), + "error", + nil, + nil, + start, + end, + createdAt, + updatedAt, + ) + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(1))) + mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message"). + WithArgs(20, 0). + WillReturnRows(rows) + + tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.NoError(t, err) + require.Len(t, tasks, 1) + require.Equal(t, int64(1), tasks[0].ID) + require.Equal(t, service.UsageCleanupStatusSucceeded, tasks[0].Status) + require.Equal(t, int64(2), tasks[0].CreatedBy) + require.Equal(t, int64(9), tasks[0].DeletedRows) + require.NotNil(t, tasks[0].ErrorMsg) + require.Equal(t, "error", *tasks[0].ErrorMsg) + require.NotNil(t, tasks[0].StartedAt) + require.NotNil(t, tasks[0].FinishedAt) + require.Equal(t, int64(1), result.Total) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryListTasksQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(2))) + mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message"). + WithArgs(20, 0). + WillReturnError(sql.ErrConnDone) + + _, _, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryListTasksInvalidFilters(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "canceled_by", "canceled_at", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(1), + service.UsageCleanupStatusSucceeded, + []byte("not-json"), + int64(2), + int64(9), + nil, + nil, + nil, + nil, + nil, + time.Now().UTC(), + time.Now().UTC(), + ) + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(1))) + mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message"). + WithArgs(20, 0). + WillReturnRows(rows) + + _, _, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTaskNone(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnRows(sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "started_at", "finished_at", "created_at", "updated_at", + })) + + task, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.NoError(t, err) + require.Nil(t, task) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTask(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + filtersJSON, err := json.Marshal(filters) + require.NoError(t, err) + + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(4), + service.UsageCleanupStatusRunning, + filtersJSON, + int64(7), + int64(0), + nil, + start, + nil, + start, + start, + ) + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnRows(rows) + + task, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.NoError(t, err) + require.NotNil(t, task) + require.Equal(t, int64(4), task.ID) + require.Equal(t, service.UsageCleanupStatusRunning, task.Status) + require.Equal(t, int64(7), task.CreatedBy) + require.NotNil(t, task.StartedAt) + require.Nil(t, task.ErrorMsg) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTaskError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnError(sql.ErrConnDone) + + _, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryClaimNextPendingTaskInvalidFilters(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + rows := sqlmock.NewRows([]string{ + "id", "status", "filters", "created_by", "deleted_rows", "error_message", + "started_at", "finished_at", "created_at", "updated_at", + }).AddRow( + int64(4), + service.UsageCleanupStatusRunning, + []byte("invalid"), + int64(7), + int64(0), + nil, + nil, + nil, + time.Now().UTC(), + time.Now().UTC(), + ) + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning). + WillReturnRows(rows) + + _, err := repo.ClaimNextPendingTask(context.Background(), 1800) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryMarkTaskSucceeded(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectExec("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusSucceeded, int64(12), int64(9)). + WillReturnResult(sqlmock.NewResult(0, 1)) + + err := repo.MarkTaskSucceeded(context.Background(), 9, 12) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryMarkTaskFailed(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectExec("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusFailed, int64(4), "boom", int64(2)). + WillReturnResult(sqlmock.NewResult(0, 1)) + + err := repo.MarkTaskFailed(context.Background(), 2, 4, "boom") + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryGetTaskStatus(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT status FROM usage_cleanup_tasks"). + WithArgs(int64(9)). + WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow(service.UsageCleanupStatusPending)) + + status, err := repo.GetTaskStatus(context.Background(), 9) + require.NoError(t, err) + require.Equal(t, service.UsageCleanupStatusPending, status) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryGetTaskStatusQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("SELECT status FROM usage_cleanup_tasks"). + WithArgs(int64(9)). + WillReturnError(sql.ErrConnDone) + + _, err := repo.GetTaskStatus(context.Background(), 9) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryUpdateTaskProgress(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectExec("UPDATE usage_cleanup_tasks"). + WithArgs(int64(123), int64(8)). + WillReturnResult(sqlmock.NewResult(0, 1)) + + err := repo.UpdateTaskProgress(context.Background(), 8, 123) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryCancelTask(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusCanceled, int64(6), int64(9), service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning). + WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(int64(6))) + + ok, err := repo.CancelTask(context.Background(), 6, 9) + require.NoError(t, err) + require.True(t, ok) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryCancelTaskNoRows(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + mock.ExpectQuery("UPDATE usage_cleanup_tasks"). + WithArgs(service.UsageCleanupStatusCanceled, int64(6), int64(9), service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning). + WillReturnRows(sqlmock.NewRows([]string{"id"})) + + ok, err := repo.CancelTask(context.Background(), 6, 9) + require.NoError(t, err) + require.False(t, ok) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryDeleteUsageLogsBatchMissingRange(t *testing.T) { + db, _ := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + _, err := repo.DeleteUsageLogsBatch(context.Background(), service.UsageCleanupFilters{}, 10) + require.Error(t, err) +} + +func TestUsageCleanupRepositoryDeleteUsageLogsBatch(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + userID := int64(3) + model := " gpt-4 " + filters := service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + Model: &model, + } + + mock.ExpectQuery("DELETE FROM usage_logs"). + WithArgs(start, end, userID, "gpt-4", 2). + WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(int64(1)).AddRow(int64(2))) + + deleted, err := repo.DeleteUsageLogsBatch(context.Background(), filters, 2) + require.NoError(t, err) + require.Equal(t, int64(2), deleted) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageCleanupRepositoryDeleteUsageLogsBatchQueryError(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageCleanupRepository{sql: db} + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + filters := service.UsageCleanupFilters{StartTime: start, EndTime: end} + + mock.ExpectQuery("DELETE FROM usage_logs"). + WithArgs(start, end, 5). + WillReturnError(sql.ErrConnDone) + + _, err := repo.DeleteUsageLogsBatch(context.Background(), filters, 5) + require.Error(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestBuildUsageCleanupWhere(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + userID := int64(1) + apiKeyID := int64(2) + accountID := int64(3) + groupID := int64(4) + model := " gpt-4 " + stream := true + billingType := int8(2) + + where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + APIKeyID: &apiKeyID, + AccountID: &accountID, + GroupID: &groupID, + Model: &model, + Stream: &stream, + BillingType: &billingType, + }) + + require.Equal(t, "created_at >= $1 AND created_at <= $2 AND user_id = $3 AND api_key_id = $4 AND account_id = $5 AND group_id = $6 AND model = $7 AND stream = $8 AND billing_type = $9", where) + require.Equal(t, []any{start, end, userID, apiKeyID, accountID, groupID, "gpt-4", stream, billingType}, args) +} + +func TestBuildUsageCleanupWhereModelEmpty(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + model := " " + + where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + Model: &model, + }) + + require.Equal(t, "created_at >= $1 AND created_at <= $2", where) + require.Equal(t, []any{start, end}, args) +} diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index 4a2aaade..963db7ba 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -1411,7 +1411,7 @@ func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKe } // GetUsageTrendWithFilters returns usage trend data with optional filters -func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) (results []TrendDataPoint, err error) { +func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) (results []TrendDataPoint, err error) { dateFormat := "YYYY-MM-DD" if granularity == "hour" { dateFormat = "YYYY-MM-DD HH24:00" @@ -1456,6 +1456,10 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND stream = $%d", len(args)+1) args = append(args, *stream) } + if billingType != nil { + query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1) + args = append(args, int16(*billingType)) + } query += " GROUP BY date ORDER BY date ASC" rows, err := r.sql.QueryContext(ctx, query, args...) @@ -1479,7 +1483,7 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start } // GetModelStatsWithFilters returns model statistics with optional filters -func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) (results []ModelStat, err error) { +func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) (results []ModelStat, err error) { actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost" // 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。 if accountID > 0 && userID == 0 && apiKeyID == 0 { @@ -1520,6 +1524,10 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND stream = $%d", len(args)+1) args = append(args, *stream) } + if billingType != nil { + query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1) + args = append(args, int16(*billingType)) + } query += " GROUP BY model ORDER BY total_tokens DESC" rows, err := r.sql.QueryContext(ctx, query, args...) @@ -1825,7 +1833,7 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID } } - models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil) + models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil, nil) if err != nil { models = []ModelStat{} } diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go index 7174be18..eb220f22 100644 --- a/backend/internal/repository/usage_log_repo_integration_test.go +++ b/backend/internal/repository/usage_log_repo_integration_test.go @@ -944,17 +944,17 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() { endTime := base.Add(48 * time.Hour) // Test with user filter - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters user filter") s.Require().Len(trend, 2) // Test with apiKey filter - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters apiKey filter") s.Require().Len(trend, 2) // Test with both filters - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters both filters") s.Require().Len(trend, 2) } @@ -971,7 +971,7 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() { startTime := base.Add(-1 * time.Hour) endTime := base.Add(3 * time.Hour) - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters hourly") s.Require().Len(trend, 2) } @@ -1017,17 +1017,17 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() { endTime := base.Add(2 * time.Hour) // Test with user filter - stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil) + stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters user filter") s.Require().Len(stats, 2) // Test with apiKey filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters apiKey filter") s.Require().Len(stats, 2) // Test with account filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters account filter") s.Require().Len(stats, 2) } diff --git a/backend/internal/repository/user_repo.go b/backend/internal/repository/user_repo.go index 006a5464..654bd16b 100644 --- a/backend/internal/repository/user_repo.go +++ b/backend/internal/repository/user_repo.go @@ -7,6 +7,7 @@ import ( "fmt" "sort" "strings" + "time" dbent "github.com/Wei-Shaw/sub2api/ent" dbuser "github.com/Wei-Shaw/sub2api/ent/user" @@ -189,6 +190,7 @@ func (r *userRepository) ListWithFilters(ctx context.Context, params pagination. dbuser.Or( dbuser.EmailContainsFold(filters.Search), dbuser.UsernameContainsFold(filters.Search), + dbuser.NotesContainsFold(filters.Search), ), ) } @@ -466,3 +468,46 @@ func applyUserEntityToService(dst *service.User, src *dbent.User) { dst.CreatedAt = src.CreatedAt dst.UpdatedAt = src.UpdatedAt } + +// UpdateTotpSecret 更新用户的 TOTP 加密密钥 +func (r *userRepository) UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error { + client := clientFromContext(ctx, r.client) + update := client.User.UpdateOneID(userID) + if encryptedSecret == nil { + update = update.ClearTotpSecretEncrypted() + } else { + update = update.SetTotpSecretEncrypted(*encryptedSecret) + } + _, err := update.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + return nil +} + +// EnableTotp 启用用户的 TOTP 双因素认证 +func (r *userRepository) EnableTotp(ctx context.Context, userID int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.User.UpdateOneID(userID). + SetTotpEnabled(true). + SetTotpEnabledAt(time.Now()). + Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + return nil +} + +// DisableTotp 禁用用户的 TOTP 双因素认证 +func (r *userRepository) DisableTotp(ctx context.Context, userID int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.User.UpdateOneID(userID). + SetTotpEnabled(false). + ClearTotpEnabledAt(). + ClearTotpSecretEncrypted(). + Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + return nil +} diff --git a/backend/internal/repository/user_subscription_repo.go b/backend/internal/repository/user_subscription_repo.go index cd3b9db6..5a649846 100644 --- a/backend/internal/repository/user_subscription_repo.go +++ b/backend/internal/repository/user_subscription_repo.go @@ -190,7 +190,7 @@ func (r *userSubscriptionRepository) ListByGroupID(ctx context.Context, groupID return userSubscriptionEntitiesToService(subs), paginationResultFromTotal(int64(total), params), nil } -func (r *userSubscriptionRepository) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]service.UserSubscription, *pagination.PaginationResult, error) { +func (r *userSubscriptionRepository) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) { client := clientFromContext(ctx, r.client) q := client.UserSubscription.Query() if userID != nil { @@ -199,7 +199,31 @@ func (r *userSubscriptionRepository) List(ctx context.Context, params pagination if groupID != nil { q = q.Where(usersubscription.GroupIDEQ(*groupID)) } - if status != "" { + + // Status filtering with real-time expiration check + now := time.Now() + switch status { + case service.SubscriptionStatusActive: + // Active: status is active AND not yet expired + q = q.Where( + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtGT(now), + ) + case service.SubscriptionStatusExpired: + // Expired: status is expired OR (status is active but already expired) + q = q.Where( + usersubscription.Or( + usersubscription.StatusEQ(service.SubscriptionStatusExpired), + usersubscription.And( + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtLTE(now), + ), + ), + ) + case "": + // No filter + default: + // Other status (e.g., revoked) q = q.Where(usersubscription.StatusEQ(status)) } @@ -208,11 +232,28 @@ func (r *userSubscriptionRepository) List(ctx context.Context, params pagination return nil, nil, err } + // Apply sorting + q = q.WithUser().WithGroup().WithAssignedByUser() + + // Determine sort field + var field string + switch sortBy { + case "expires_at": + field = usersubscription.FieldExpiresAt + case "status": + field = usersubscription.FieldStatus + default: + field = usersubscription.FieldCreatedAt + } + + // Determine sort order (default: desc) + if sortOrder == "asc" && sortBy != "" { + q = q.Order(dbent.Asc(field)) + } else { + q = q.Order(dbent.Desc(field)) + } + subs, err := q. - WithUser(). - WithGroup(). - WithAssignedByUser(). - Order(dbent.Desc(usersubscription.FieldCreatedAt)). Offset(params.Offset()). Limit(params.Limit()). All(ctx) diff --git a/backend/internal/repository/user_subscription_repo_integration_test.go b/backend/internal/repository/user_subscription_repo_integration_test.go index 2099e5d8..60a5a378 100644 --- a/backend/internal/repository/user_subscription_repo_integration_test.go +++ b/backend/internal/repository/user_subscription_repo_integration_test.go @@ -271,7 +271,7 @@ func (s *UserSubscriptionRepoSuite) TestList_NoFilters() { group := s.mustCreateGroup("g-list") s.mustCreateSubscription(user.ID, group.ID, nil) - subs, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, "") + subs, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, "", "", "") s.Require().NoError(err, "List") s.Require().Len(subs, 1) s.Require().Equal(int64(1), page.Total) @@ -285,7 +285,7 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByUserID() { s.mustCreateSubscription(user1.ID, group.ID, nil) s.mustCreateSubscription(user2.ID, group.ID, nil) - subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, &user1.ID, nil, "") + subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, &user1.ID, nil, "", "", "") s.Require().NoError(err) s.Require().Len(subs, 1) s.Require().Equal(user1.ID, subs[0].UserID) @@ -299,7 +299,7 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByGroupID() { s.mustCreateSubscription(user.ID, g1.ID, nil) s.mustCreateSubscription(user.ID, g2.ID, nil) - subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, &g1.ID, "") + subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, &g1.ID, "", "", "") s.Require().NoError(err) s.Require().Len(subs, 1) s.Require().Equal(g1.ID, subs[0].GroupID) @@ -320,7 +320,7 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByStatus() { c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) }) - subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, service.SubscriptionStatusExpired) + subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, service.SubscriptionStatusExpired, "", "") s.Require().NoError(err) s.Require().Len(subs, 1) s.Require().Equal(service.SubscriptionStatusExpired, subs[0].Status) diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index 77ed37e1..e3394361 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -56,7 +56,10 @@ var ProviderSet = wire.NewSet( NewProxyRepository, NewRedeemCodeRepository, NewPromoCodeRepository, + NewAnnouncementRepository, + NewAnnouncementReadRepository, NewUsageLogRepository, + NewUsageCleanupRepository, NewDashboardAggregationRepository, NewSettingRepository, NewOpsRepository, @@ -81,6 +84,10 @@ var ProviderSet = wire.NewSet( NewSchedulerCache, NewSchedulerOutboxRepository, NewProxyLatencyCache, + NewTotpCache, + + // Encryptors + NewAESEncryptor, // HTTP service ports (DI Strategy A: return interface directly) NewTurnstileVerifier, diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 81b94305..22e6213e 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -11,7 +11,6 @@ import ( "net/http" "net/http/httptest" "sort" - "strings" "testing" "time" @@ -52,7 +51,6 @@ func TestAPIContracts(t *testing.T) { "id": 1, "email": "alice@example.com", "username": "alice", - "notes": "hello", "role": "user", "balance": 12.5, "concurrency": 5, @@ -132,6 +130,153 @@ func TestAPIContracts(t *testing.T) { } }`, }, + { + name: "GET /api/v1/groups/available", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + // 普通用户可见的分组列表不应包含内部字段(如 model_routing/account_count)。 + deps.groupRepo.SetActive([]service.Group{ + { + ID: 10, + Name: "Group One", + Description: "desc", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.5, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-*": []int64{101, 102}, + }, + AccountCount: 2, + CreatedAt: deps.now, + UpdatedAt: deps.now, + }, + }) + deps.userSubRepo.SetActiveByUserID(1, nil) + }, + method: http.MethodGet, + path: "/api/v1/groups/available", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": [ + { + "id": 10, + "name": "Group One", + "description": "desc", + "platform": "anthropic", + "rate_multiplier": 1.5, + "is_exclusive": false, + "status": "active", + "subscription_type": "standard", + "daily_limit_usd": null, + "weekly_limit_usd": null, + "monthly_limit_usd": null, + "image_price_1k": null, + "image_price_2k": null, + "image_price_4k": null, + "claude_code_only": false, + "fallback_group_id": null, + "created_at": "2025-01-02T03:04:05Z", + "updated_at": "2025-01-02T03:04:05Z" + } + ] + }`, + }, + { + name: "GET /api/v1/subscriptions", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + // 普通用户订阅接口不应包含 assigned_* / notes 等管理员字段。 + deps.userSubRepo.SetByUserID(1, []service.UserSubscription{ + { + ID: 501, + UserID: 1, + GroupID: 10, + StartsAt: deps.now, + ExpiresAt: time.Date(2099, 1, 2, 3, 4, 5, 0, time.UTC), // 使用未来日期避免 normalizeSubscriptionStatus 标记为过期 + Status: service.SubscriptionStatusActive, + DailyUsageUSD: 1.23, + WeeklyUsageUSD: 2.34, + MonthlyUsageUSD: 3.45, + AssignedBy: ptr(int64(999)), + AssignedAt: deps.now, + Notes: "admin-note", + CreatedAt: deps.now, + UpdatedAt: deps.now, + }, + }) + }, + method: http.MethodGet, + path: "/api/v1/subscriptions", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": [ + { + "id": 501, + "user_id": 1, + "group_id": 10, + "starts_at": "2025-01-02T03:04:05Z", + "expires_at": "2099-01-02T03:04:05Z", + "status": "active", + "daily_window_start": null, + "weekly_window_start": null, + "monthly_window_start": null, + "daily_usage_usd": 1.23, + "weekly_usage_usd": 2.34, + "monthly_usage_usd": 3.45, + "created_at": "2025-01-02T03:04:05Z", + "updated_at": "2025-01-02T03:04:05Z" + } + ] + }`, + }, + { + name: "GET /api/v1/redeem/history", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + // 普通用户兑换历史不应包含 notes 等内部字段。 + deps.redeemRepo.SetByUser(1, []service.RedeemCode{ + { + ID: 900, + Code: "CODE-123", + Type: service.RedeemTypeBalance, + Value: 1.25, + Status: service.StatusUsed, + UsedBy: ptr(int64(1)), + UsedAt: ptr(deps.now), + Notes: "internal-note", + CreatedAt: deps.now, + }, + }) + }, + method: http.MethodGet, + path: "/api/v1/redeem/history", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": [ + { + "id": 900, + "code": "CODE-123", + "type": "balance", + "value": 1.25, + "status": "used", + "used_by": 1, + "used_at": "2025-01-02T03:04:05Z", + "created_at": "2025-01-02T03:04:05Z", + "group_id": null, + "validity_days": 0 + } + ] + }`, + }, { name: "GET /api/v1/usage/stats", setup: func(t *testing.T, deps *contractDeps) { @@ -191,24 +336,25 @@ func TestAPIContracts(t *testing.T) { t.Helper() deps.usageRepo.SetUserLogs(1, []service.UsageLog{ { - ID: 1, - UserID: 1, - APIKeyID: 100, - AccountID: 200, - RequestID: "req_123", - Model: "claude-3", - InputTokens: 10, - OutputTokens: 20, - CacheCreationTokens: 1, - CacheReadTokens: 2, - TotalCost: 0.5, - ActualCost: 0.5, - RateMultiplier: 1, - BillingType: service.BillingTypeBalance, - Stream: true, - DurationMs: ptr(100), - FirstTokenMs: ptr(50), - CreatedAt: deps.now, + ID: 1, + UserID: 1, + APIKeyID: 100, + AccountID: 200, + AccountRateMultiplier: ptr(0.5), + RequestID: "req_123", + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 1, + CacheReadTokens: 2, + TotalCost: 0.5, + ActualCost: 0.5, + RateMultiplier: 1, + BillingType: service.BillingTypeBalance, + Stream: true, + DurationMs: ptr(100), + FirstTokenMs: ptr(50), + CreatedAt: deps.now, }, }) }, @@ -239,10 +385,9 @@ func TestAPIContracts(t *testing.T) { "output_cost": 0, "cache_creation_cost": 0, "cache_read_cost": 0, - "total_cost": 0.5, + "total_cost": 0.5, "actual_cost": 0.5, "rate_multiplier": 1, - "account_rate_multiplier": null, "billing_type": 0, "stream": true, "duration_ms": 100, @@ -267,6 +412,7 @@ func TestAPIContracts(t *testing.T) { deps.settingRepo.SetAll(map[string]string{ service.SettingKeyRegistrationEnabled: "true", service.SettingKeyEmailVerifyEnabled: "false", + service.SettingKeyPromoCodeEnabled: "true", service.SettingKeySMTPHost: "smtp.example.com", service.SettingKeySMTPPort: "587", @@ -305,6 +451,10 @@ func TestAPIContracts(t *testing.T) { "data": { "registration_enabled": true, "email_verify_enabled": false, + "promo_code_enabled": true, + "password_reset_enabled": false, + "totp_enabled": false, + "totp_encryption_key_configured": false, "smtp_host": "smtp.example.com", "smtp_port": 587, "smtp_username": "user", @@ -338,45 +488,10 @@ func TestAPIContracts(t *testing.T) { "fallback_model_openai": "gpt-4o", "enable_identity_patch": true, "identity_patch_prompt": "", - "home_content": "" - } - }`, - }, - { - name: "POST /api/v1/admin/accounts/lookup", - setup: func(t *testing.T, deps *contractDeps) { - t.Helper() - deps.accountRepo.lookupAccounts = []service.Account{ - { - ID: 101, - Name: "Alice Account", - Platform: "antigravity", - Credentials: map[string]any{ - "email": "alice@example.com", - }, - }, - } - }, - method: http.MethodPost, - path: "/api/v1/admin/accounts/lookup", - body: `{"platform":"antigravity","emails":["Alice@Example.com","bob@example.com"]}`, - headers: map[string]string{ - "Content-Type": "application/json", - }, - wantStatus: http.StatusOK, - wantJSON: `{ - "code": 0, - "message": "success", - "data": { - "matched": [ - { - "email": "alice@example.com", - "account_id": 101, - "platform": "antigravity", - "name": "Alice Account" - } - ], - "missing": ["bob@example.com"] + "home_content": "", + "hide_ccs_import_button": false, + "purchase_subscription_enabled": false, + "purchase_subscription_url": "" } }`, }, @@ -424,9 +539,11 @@ type contractDeps struct { now time.Time router http.Handler apiKeyRepo *stubApiKeyRepo + groupRepo *stubGroupRepo + userSubRepo *stubUserSubscriptionRepo usageRepo *stubUsageLogRepo settingRepo *stubSettingRepo - accountRepo *stubAccountRepo + redeemRepo *stubRedeemCodeRepo } func newContractDeps(t *testing.T) *contractDeps { @@ -454,11 +571,11 @@ func newContractDeps(t *testing.T) *contractDeps { apiKeyRepo := newStubApiKeyRepo(now) apiKeyCache := stubApiKeyCache{} - groupRepo := stubGroupRepo{} - userSubRepo := stubUserSubscriptionRepo{} + groupRepo := &stubGroupRepo{} + userSubRepo := &stubUserSubscriptionRepo{} accountRepo := stubAccountRepo{} proxyRepo := stubProxyRepo{} - redeemRepo := stubRedeemCodeRepo{} + redeemRepo := &stubRedeemCodeRepo{} cfg := &config.Config{ Default: config.DefaultConfig{ @@ -473,15 +590,21 @@ func newContractDeps(t *testing.T) *contractDeps { usageRepo := newStubUsageLogRepo() usageService := service.NewUsageService(usageRepo, userRepo, nil, nil) + subscriptionService := service.NewSubscriptionService(groupRepo, userSubRepo, nil) + subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService) + + redeemService := service.NewRedeemService(redeemRepo, userRepo, subscriptionService, nil, nil, nil, nil) + redeemHandler := handler.NewRedeemHandler(redeemService) + settingRepo := newStubSettingRepo() settingService := service.NewSettingService(settingRepo, cfg) adminService := service.NewAdminService(userRepo, groupRepo, &accountRepo, proxyRepo, apiKeyRepo, redeemRepo, nil, nil, nil, nil) - authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService, nil) + authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService, nil, nil) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) usageHandler := handler.NewUsageHandler(usageService, apiKeyService) adminSettingHandler := adminhandler.NewSettingHandler(settingService, nil, nil, nil) - adminAccountHandler := adminhandler.NewAccountHandler(adminService, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + adminAccountHandler := adminhandler.NewAccountHandler(adminService, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) jwtAuth := func(c *gin.Context) { c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{ @@ -512,25 +635,35 @@ func newContractDeps(t *testing.T) *contractDeps { v1Keys.Use(jwtAuth) v1Keys.GET("/keys", apiKeyHandler.List) v1Keys.POST("/keys", apiKeyHandler.Create) + v1Keys.GET("/groups/available", apiKeyHandler.GetAvailableGroups) v1Usage := v1.Group("") v1Usage.Use(jwtAuth) v1Usage.GET("/usage", usageHandler.List) v1Usage.GET("/usage/stats", usageHandler.Stats) + v1Subs := v1.Group("") + v1Subs.Use(jwtAuth) + v1Subs.GET("/subscriptions", subscriptionHandler.List) + + v1Redeem := v1.Group("") + v1Redeem.Use(jwtAuth) + v1Redeem.GET("/redeem/history", redeemHandler.GetHistory) + v1Admin := v1.Group("/admin") v1Admin.Use(adminAuth) v1Admin.GET("/settings", adminSettingHandler.GetSettings) v1Admin.POST("/accounts/bulk-update", adminAccountHandler.BulkUpdate) - v1Admin.POST("/accounts/lookup", adminAccountHandler.Lookup) return &contractDeps{ now: now, router: r, apiKeyRepo: apiKeyRepo, + groupRepo: groupRepo, + userSubRepo: userSubRepo, usageRepo: usageRepo, settingRepo: settingRepo, - accountRepo: &accountRepo, + redeemRepo: redeemRepo, } } @@ -626,6 +759,18 @@ func (r *stubUserRepo) RemoveGroupFromAllowedGroups(ctx context.Context, groupID return 0, errors.New("not implemented") } +func (r *stubUserRepo) UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) EnableTotp(ctx context.Context, userID int64) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) DisableTotp(ctx context.Context, userID int64) error { + return errors.New("not implemented") +} + type stubApiKeyCache struct{} func (stubApiKeyCache) GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) { @@ -660,7 +805,21 @@ func (stubApiKeyCache) DeleteAuthCache(ctx context.Context, key string) error { return nil } -type stubGroupRepo struct{} +func (stubApiKeyCache) PublishAuthCacheInvalidation(ctx context.Context, cacheKey string) error { + return nil +} + +func (stubApiKeyCache) SubscribeAuthCacheInvalidation(ctx context.Context, handler func(cacheKey string)) error { + return nil +} + +type stubGroupRepo struct { + active []service.Group +} + +func (r *stubGroupRepo) SetActive(groups []service.Group) { + r.active = append([]service.Group(nil), groups...) +} func (stubGroupRepo) Create(ctx context.Context, group *service.Group) error { return errors.New("not implemented") @@ -694,12 +853,19 @@ func (stubGroupRepo) ListWithFilters(ctx context.Context, params pagination.Pagi return nil, nil, errors.New("not implemented") } -func (stubGroupRepo) ListActive(ctx context.Context) ([]service.Group, error) { - return nil, errors.New("not implemented") +func (r *stubGroupRepo) ListActive(ctx context.Context) ([]service.Group, error) { + return append([]service.Group(nil), r.active...), nil } -func (stubGroupRepo) ListActiveByPlatform(ctx context.Context, platform string) ([]service.Group, error) { - return nil, errors.New("not implemented") +func (r *stubGroupRepo) ListActiveByPlatform(ctx context.Context, platform string) ([]service.Group, error) { + out := make([]service.Group, 0, len(r.active)) + for i := range r.active { + g := r.active[i] + if g.Platform == platform { + out = append(out, g) + } + } + return out, nil } func (stubGroupRepo) ExistsByName(ctx context.Context, name string) (bool, error) { @@ -715,8 +881,7 @@ func (stubGroupRepo) DeleteAccountGroupsByGroupID(ctx context.Context, groupID i } type stubAccountRepo struct { - bulkUpdateIDs []int64 - lookupAccounts []service.Account + bulkUpdateIDs []int64 } func (s *stubAccountRepo) Create(ctx context.Context, account *service.Account) error { @@ -767,36 +932,6 @@ func (s *stubAccountRepo) ListByPlatform(ctx context.Context, platform string) ( return nil, errors.New("not implemented") } -func (s *stubAccountRepo) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]service.Account, error) { - if len(s.lookupAccounts) == 0 { - return nil, nil - } - emailSet := make(map[string]struct{}, len(emails)) - for _, email := range emails { - normalized := strings.ToLower(strings.TrimSpace(email)) - if normalized == "" { - continue - } - emailSet[normalized] = struct{}{} - } - var matches []service.Account - for i := range s.lookupAccounts { - account := &s.lookupAccounts[i] - if account.Platform != platform { - continue - } - accountEmail := strings.ToLower(strings.TrimSpace(account.GetCredential("email"))) - if accountEmail == "" { - continue - } - if _, ok := emailSet[accountEmail]; !ok { - continue - } - matches = append(matches, *account) - } - return matches, nil -} - func (s *stubAccountRepo) UpdateLastUsed(ctx context.Context, id int64) error { return errors.New("not implemented") } @@ -948,7 +1083,16 @@ func (stubProxyRepo) ListAccountSummariesByProxyID(ctx context.Context, proxyID return nil, errors.New("not implemented") } -type stubRedeemCodeRepo struct{} +type stubRedeemCodeRepo struct { + byUser map[int64][]service.RedeemCode +} + +func (r *stubRedeemCodeRepo) SetByUser(userID int64, codes []service.RedeemCode) { + if r.byUser == nil { + r.byUser = make(map[int64][]service.RedeemCode) + } + r.byUser[userID] = append([]service.RedeemCode(nil), codes...) +} func (stubRedeemCodeRepo) Create(ctx context.Context, code *service.RedeemCode) error { return errors.New("not implemented") @@ -986,11 +1130,35 @@ func (stubRedeemCodeRepo) ListWithFilters(ctx context.Context, params pagination return nil, nil, errors.New("not implemented") } -func (stubRedeemCodeRepo) ListByUser(ctx context.Context, userID int64, limit int) ([]service.RedeemCode, error) { - return nil, errors.New("not implemented") +func (r *stubRedeemCodeRepo) ListByUser(ctx context.Context, userID int64, limit int) ([]service.RedeemCode, error) { + if r.byUser == nil { + return nil, nil + } + codes := r.byUser[userID] + if limit > 0 && len(codes) > limit { + codes = codes[:limit] + } + return append([]service.RedeemCode(nil), codes...), nil } -type stubUserSubscriptionRepo struct{} +type stubUserSubscriptionRepo struct { + byUser map[int64][]service.UserSubscription + activeByUser map[int64][]service.UserSubscription +} + +func (r *stubUserSubscriptionRepo) SetByUserID(userID int64, subs []service.UserSubscription) { + if r.byUser == nil { + r.byUser = make(map[int64][]service.UserSubscription) + } + r.byUser[userID] = append([]service.UserSubscription(nil), subs...) +} + +func (r *stubUserSubscriptionRepo) SetActiveByUserID(userID int64, subs []service.UserSubscription) { + if r.activeByUser == nil { + r.activeByUser = make(map[int64][]service.UserSubscription) + } + r.activeByUser[userID] = append([]service.UserSubscription(nil), subs...) +} func (stubUserSubscriptionRepo) Create(ctx context.Context, sub *service.UserSubscription) error { return errors.New("not implemented") @@ -1010,16 +1178,22 @@ func (stubUserSubscriptionRepo) Update(ctx context.Context, sub *service.UserSub func (stubUserSubscriptionRepo) Delete(ctx context.Context, id int64) error { return errors.New("not implemented") } -func (stubUserSubscriptionRepo) ListByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { - return nil, errors.New("not implemented") +func (r *stubUserSubscriptionRepo) ListByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + if r.byUser == nil { + return nil, nil + } + return append([]service.UserSubscription(nil), r.byUser[userID]...), nil } -func (stubUserSubscriptionRepo) ListActiveByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { - return nil, errors.New("not implemented") +func (r *stubUserSubscriptionRepo) ListActiveByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + if r.activeByUser == nil { + return nil, nil + } + return append([]service.UserSubscription(nil), r.activeByUser[userID]...), nil } func (stubUserSubscriptionRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) { return nil, nil, errors.New("not implemented") } -func (stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]service.UserSubscription, *pagination.PaginationResult, error) { +func (stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) { return nil, nil, errors.New("not implemented") } func (stubUserSubscriptionRepo) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) { @@ -1319,11 +1493,11 @@ func (r *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.D return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { +func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { +func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { return nil, errors.New("not implemented") } diff --git a/backend/internal/server/middleware/api_key_auth_test.go b/backend/internal/server/middleware/api_key_auth_test.go index 84398093..920ff93f 100644 --- a/backend/internal/server/middleware/api_key_auth_test.go +++ b/backend/internal/server/middleware/api_key_auth_test.go @@ -367,7 +367,7 @@ func (r *stubUserSubscriptionRepo) ListByGroupID(ctx context.Context, groupID in return nil, nil, errors.New("not implemented") } -func (r *stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]service.UserSubscription, *pagination.PaginationResult, error) { +func (r *stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) { return nil, nil, errors.New("not implemented") } diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index cf6fa942..3e0033e7 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -29,6 +29,9 @@ func RegisterAdminRoutes( // 账号管理 registerAccountRoutes(admin, h) + // 公告管理 + registerAnnouncementRoutes(admin, h) + // OpenAI OAuth registerOpenAIOAuthRoutes(admin, h) @@ -197,7 +200,6 @@ func registerAccountRoutes(admin *gin.RouterGroup, h *handler.Handlers) { accounts := admin.Group("/accounts") { accounts.GET("", h.Admin.Account.List) - accounts.POST("/lookup", h.Admin.Account.Lookup) accounts.GET("/:id", h.Admin.Account.GetByID) accounts.POST("", h.Admin.Account.Create) accounts.POST("/sync/crs", h.Admin.Account.SyncFromCRS) @@ -230,6 +232,18 @@ func registerAccountRoutes(admin *gin.RouterGroup, h *handler.Handlers) { } } +func registerAnnouncementRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + announcements := admin.Group("/announcements") + { + announcements.GET("", h.Admin.Announcement.List) + announcements.POST("", h.Admin.Announcement.Create) + announcements.GET("/:id", h.Admin.Announcement.GetByID) + announcements.PUT("/:id", h.Admin.Announcement.Update) + announcements.DELETE("/:id", h.Admin.Announcement.Delete) + announcements.GET("/:id/read-status", h.Admin.Announcement.ListReadStatus) + } +} + func registerOpenAIOAuthRoutes(admin *gin.RouterGroup, h *handler.Handlers) { openai := admin.Group("/openai") { @@ -355,6 +369,9 @@ func registerUsageRoutes(admin *gin.RouterGroup, h *handler.Handlers) { usage.GET("/stats", h.Admin.Usage.Stats) usage.GET("/search-users", h.Admin.Usage.SearchUsers) usage.GET("/search-api-keys", h.Admin.Usage.SearchAPIKeys) + usage.GET("/cleanup-tasks", h.Admin.Usage.ListCleanupTasks) + usage.POST("/cleanup-tasks", h.Admin.Usage.CreateCleanupTask) + usage.POST("/cleanup-tasks/:id/cancel", h.Admin.Usage.CancelCleanupTask) } } diff --git a/backend/internal/server/routes/auth.go b/backend/internal/server/routes/auth.go index aa691eba..33a88e82 100644 --- a/backend/internal/server/routes/auth.go +++ b/backend/internal/server/routes/auth.go @@ -26,11 +26,20 @@ func RegisterAuthRoutes( { auth.POST("/register", h.Auth.Register) auth.POST("/login", h.Auth.Login) + auth.POST("/login/2fa", h.Auth.Login2FA) auth.POST("/send-verify-code", h.Auth.SendVerifyCode) // 优惠码验证接口添加速率限制:每分钟最多 10 次(Redis 故障时 fail-close) auth.POST("/validate-promo-code", rateLimiter.LimitWithOptions("validate-promo", 10, time.Minute, middleware.RateLimitOptions{ FailureMode: middleware.RateLimitFailClose, }), h.Auth.ValidatePromoCode) + // 忘记密码接口添加速率限制:每分钟最多 5 次(Redis 故障时 fail-close) + auth.POST("/forgot-password", rateLimiter.LimitWithOptions("forgot-password", 5, time.Minute, middleware.RateLimitOptions{ + FailureMode: middleware.RateLimitFailClose, + }), h.Auth.ForgotPassword) + // 重置密码接口添加速率限制:每分钟最多 10 次(Redis 故障时 fail-close) + auth.POST("/reset-password", rateLimiter.LimitWithOptions("reset-password", 10, time.Minute, middleware.RateLimitOptions{ + FailureMode: middleware.RateLimitFailClose, + }), h.Auth.ResetPassword) auth.GET("/oauth/linuxdo/start", h.Auth.LinuxDoOAuthStart) auth.GET("/oauth/linuxdo/callback", h.Auth.LinuxDoOAuthCallback) } diff --git a/backend/internal/server/routes/user.go b/backend/internal/server/routes/user.go index ad2166fe..5581e1e1 100644 --- a/backend/internal/server/routes/user.go +++ b/backend/internal/server/routes/user.go @@ -22,6 +22,17 @@ func RegisterUserRoutes( user.GET("/profile", h.User.GetProfile) user.PUT("/password", h.User.ChangePassword) user.PUT("", h.User.UpdateProfile) + + // TOTP 双因素认证 + totp := user.Group("/totp") + { + totp.GET("/status", h.Totp.GetStatus) + totp.GET("/verification-method", h.Totp.GetVerificationMethod) + totp.POST("/send-code", h.Totp.SendVerifyCode) + totp.POST("/setup", h.Totp.InitiateSetup) + totp.POST("/enable", h.Totp.Enable) + totp.POST("/disable", h.Totp.Disable) + } } // API Key管理 @@ -53,6 +64,13 @@ func RegisterUserRoutes( usage.POST("/dashboard/api-keys-usage", h.Usage.DashboardAPIKeysUsage) } + // 公告(用户可见) + announcements := authenticated.Group("/announcements") + { + announcements.GET("", h.Announcement.List) + announcements.POST("/:id/read", h.Announcement.MarkRead) + } + // 卡密兑换 redeem := authenticated.Group("/redeem") { diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go index 36ba0bcc..182e0161 100644 --- a/backend/internal/service/account.go +++ b/backend/internal/service/account.go @@ -197,6 +197,35 @@ func (a *Account) GetCredentialAsTime(key string) *time.Time { return nil } +// GetCredentialAsInt64 解析凭证中的 int64 字段 +// 用于读取 _token_version 等内部字段 +func (a *Account) GetCredentialAsInt64(key string) int64 { + if a == nil || a.Credentials == nil { + return 0 + } + val, ok := a.Credentials[key] + if !ok || val == nil { + return 0 + } + switch v := val.(type) { + case int64: + return v + case float64: + return int64(v) + case int: + return int64(v) + case json.Number: + if i, err := v.Int64(); err == nil { + return i + } + case string: + if i, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil { + return i + } + } + return 0 +} + func (a *Account) IsTempUnschedulableEnabled() bool { if a.Credentials == nil { return false @@ -576,6 +605,44 @@ func (a *Account) IsAnthropicOAuthOrSetupToken() bool { return a.Platform == PlatformAnthropic && (a.Type == AccountTypeOAuth || a.Type == AccountTypeSetupToken) } +// IsTLSFingerprintEnabled 检查是否启用 TLS 指纹伪装 +// 仅适用于 Anthropic OAuth/SetupToken 类型账号 +// 启用后将模拟 Claude Code (Node.js) 客户端的 TLS 握手特征 +func (a *Account) IsTLSFingerprintEnabled() bool { + // 仅支持 Anthropic OAuth/SetupToken 账号 + if !a.IsAnthropicOAuthOrSetupToken() { + return false + } + if a.Extra == nil { + return false + } + if v, ok := a.Extra["enable_tls_fingerprint"]; ok { + if enabled, ok := v.(bool); ok { + return enabled + } + } + return false +} + +// IsSessionIDMaskingEnabled 检查是否启用会话ID伪装 +// 仅适用于 Anthropic OAuth/SetupToken 类型账号 +// 启用后将在一段时间内(15分钟)固定 metadata.user_id 中的 session ID, +// 使上游认为请求来自同一个会话 +func (a *Account) IsSessionIDMaskingEnabled() bool { + if !a.IsAnthropicOAuthOrSetupToken() { + return false + } + if a.Extra == nil { + return false + } + if v, ok := a.Extra["session_id_masking_enabled"]; ok { + if enabled, ok := v.(bool); ok { + return enabled + } + } + return false +} + // GetWindowCostLimit 获取 5h 窗口费用阈值(美元) // 返回 0 表示未启用 func (a *Account) GetWindowCostLimit() float64 { @@ -652,6 +719,23 @@ func (a *Account) CheckWindowCostSchedulability(currentWindowCost float64) Windo return WindowCostNotSchedulable } +// GetCurrentWindowStartTime 获取当前有效的窗口开始时间 +// 逻辑: +// 1. 如果窗口未过期(SessionWindowEnd 存在且在当前时间之后),使用记录的 SessionWindowStart +// 2. 否则(窗口过期或未设置),使用新的预测窗口开始时间(从当前整点开始) +func (a *Account) GetCurrentWindowStartTime() time.Time { + now := time.Now() + + // 窗口未过期,使用记录的窗口开始时间 + if a.SessionWindowStart != nil && a.SessionWindowEnd != nil && now.Before(*a.SessionWindowEnd) { + return *a.SessionWindowStart + } + + // 窗口已过期或未设置,预测新的窗口开始时间(从当前整点开始) + // 与 ratelimit_service.go 中 UpdateSessionWindow 的预测逻辑保持一致 + return time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) +} + // parseExtraFloat64 从 extra 字段解析 float64 值 func parseExtraFloat64(value any) float64 { switch v := value.(type) { diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index 72c5c5f8..90365d2f 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -33,7 +33,6 @@ type AccountRepository interface { ListByGroup(ctx context.Context, groupID int64) ([]Account, error) ListActive(ctx context.Context) ([]Account, error) ListByPlatform(ctx context.Context, platform string) ([]Account, error) - ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) UpdateLastUsed(ctx context.Context, id int64) error BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go index 08b0d5b6..e5eabfc6 100644 --- a/backend/internal/service/account_service_delete_test.go +++ b/backend/internal/service/account_service_delete_test.go @@ -87,10 +87,6 @@ func (s *accountRepoStub) ListByPlatform(ctx context.Context, platform string) ( panic("unexpected ListByPlatform call") } -func (s *accountRepoStub) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) { - panic("unexpected ListByPlatformAndCredentialEmails call") -} - func (s *accountRepoStub) UpdateLastUsed(ctx context.Context, id int64) error { panic("unexpected UpdateLastUsed call") } diff --git a/backend/internal/service/account_test_service.go b/backend/internal/service/account_test_service.go index 8419c2b4..46376c69 100644 --- a/backend/internal/service/account_test_service.go +++ b/backend/internal/service/account_test_service.go @@ -265,7 +265,7 @@ func (s *AccountTestService) testClaudeAccountConnection(c *gin.Context, account proxyURL = account.Proxy.URL() } - resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + resp, err := s.httpUpstream.DoWithTLS(req, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if err != nil { return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) } @@ -375,7 +375,7 @@ func (s *AccountTestService) testOpenAIAccountConnection(c *gin.Context, account proxyURL = account.Proxy.URL() } - resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + resp, err := s.httpUpstream.DoWithTLS(req, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if err != nil { return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) } @@ -446,7 +446,7 @@ func (s *AccountTestService) testGeminiAccountConnection(c *gin.Context, account proxyURL = account.Proxy.URL() } - resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + resp, err := s.httpUpstream.DoWithTLS(req, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if err != nil { return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) } diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go index 6f012385..f3b3e20d 100644 --- a/backend/internal/service/account_usage_service.go +++ b/backend/internal/service/account_usage_service.go @@ -32,8 +32,8 @@ type UsageLogRepository interface { // Admin dashboard stats GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) - GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) - GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) + GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) + GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error) GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*usagestats.BatchUserUsageStats, error) @@ -157,9 +157,20 @@ type ClaudeUsageResponse struct { } `json:"seven_day_sonnet"` } +// ClaudeUsageFetchOptions 包含获取 Claude 用量数据所需的所有选项 +type ClaudeUsageFetchOptions struct { + AccessToken string // OAuth access token + ProxyURL string // 代理 URL(可选) + AccountID int64 // 账号 ID(用于 TLS 指纹选择) + EnableTLSFingerprint bool // 是否启用 TLS 指纹伪装 + Fingerprint *Fingerprint // 缓存的指纹信息(User-Agent 等) +} + // ClaudeUsageFetcher fetches usage data from Anthropic OAuth API type ClaudeUsageFetcher interface { FetchUsage(ctx context.Context, accessToken, proxyURL string) (*ClaudeUsageResponse, error) + // FetchUsageWithOptions 使用完整选项获取用量数据,支持 TLS 指纹和自定义 User-Agent + FetchUsageWithOptions(ctx context.Context, opts *ClaudeUsageFetchOptions) (*ClaudeUsageResponse, error) } // AccountUsageService 账号使用量查询服务 @@ -170,6 +181,7 @@ type AccountUsageService struct { geminiQuotaService *GeminiQuotaService antigravityQuotaFetcher *AntigravityQuotaFetcher cache *UsageCache + identityCache IdentityCache } // NewAccountUsageService 创建AccountUsageService实例 @@ -180,6 +192,7 @@ func NewAccountUsageService( geminiQuotaService *GeminiQuotaService, antigravityQuotaFetcher *AntigravityQuotaFetcher, cache *UsageCache, + identityCache IdentityCache, ) *AccountUsageService { return &AccountUsageService{ accountRepo: accountRepo, @@ -188,6 +201,7 @@ func NewAccountUsageService( geminiQuotaService: geminiQuotaService, antigravityQuotaFetcher: antigravityQuotaFetcher, cache: cache, + identityCache: identityCache, } } @@ -272,7 +286,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou } dayStart := geminiDailyWindowStart(now) - stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil) + stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return nil, fmt.Errorf("get gemini usage stats failed: %w", err) } @@ -294,7 +308,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou // Minute window (RPM) - fixed-window approximation: current minute [truncate(now), truncate(now)+1m) minuteStart := now.Truncate(time.Minute) minuteResetAt := minuteStart.Add(time.Minute) - minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil) + minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return nil, fmt.Errorf("get gemini minute usage stats failed: %w", err) } @@ -369,12 +383,8 @@ func (s *AccountUsageService) addWindowStats(ctx context.Context, account *Accou // 如果没有缓存,从数据库查询 if windowStats == nil { - var startTime time.Time - if account.SessionWindowStart != nil { - startTime = *account.SessionWindowStart - } else { - startTime = time.Now().Add(-5 * time.Hour) - } + // 使用统一的窗口开始时间计算逻辑(考虑窗口过期情况) + startTime := account.GetCurrentWindowStartTime() stats, err := s.usageLogRepo.GetAccountWindowStats(ctx, account.ID, startTime) if err != nil { @@ -428,6 +438,8 @@ func (s *AccountUsageService) GetAccountUsageStats(ctx context.Context, accountI } // fetchOAuthUsageRaw 从 Anthropic API 获取原始响应(不构建 UsageInfo) +// 如果账号开启了 TLS 指纹,则使用 TLS 指纹伪装 +// 如果有缓存的 Fingerprint,则使用缓存的 User-Agent 等信息 func (s *AccountUsageService) fetchOAuthUsageRaw(ctx context.Context, account *Account) (*ClaudeUsageResponse, error) { accessToken := account.GetCredential("access_token") if accessToken == "" { @@ -439,7 +451,22 @@ func (s *AccountUsageService) fetchOAuthUsageRaw(ctx context.Context, account *A proxyURL = account.Proxy.URL() } - return s.usageFetcher.FetchUsage(ctx, accessToken, proxyURL) + // 构建完整的选项 + opts := &ClaudeUsageFetchOptions{ + AccessToken: accessToken, + ProxyURL: proxyURL, + AccountID: account.ID, + EnableTLSFingerprint: account.IsTLSFingerprintEnabled(), + } + + // 尝试获取缓存的 Fingerprint(包含 User-Agent 等信息) + if s.identityCache != nil { + if fp, err := s.identityCache.GetFingerprint(ctx, account.ID); err == nil && fp != nil { + opts.Fingerprint = fp + } + } + + return s.usageFetcher.FetchUsageWithOptions(ctx, opts) } // parseTime 尝试多种格式解析时间 diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 392c7aa2..d541c73a 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -40,7 +40,6 @@ type AdminService interface { CreateAccount(ctx context.Context, input *CreateAccountInput) (*Account, error) UpdateAccount(ctx context.Context, id int64, input *UpdateAccountInput) (*Account, error) DeleteAccount(ctx context.Context, id int64) error - LookupAccountsByCredentialEmail(ctx context.Context, platform string, emails []string) ([]Account, error) RefreshAccountCredentials(ctx context.Context, id int64) (*Account, error) ClearAccountError(ctx context.Context, id int64) (*Account, error) SetAccountError(ctx context.Context, id int64, errorMsg string) error @@ -866,13 +865,6 @@ func (s *adminServiceImpl) GetAccount(ctx context.Context, id int64) (*Account, return s.accountRepo.GetByID(ctx, id) } -func (s *adminServiceImpl) LookupAccountsByCredentialEmail(ctx context.Context, platform string, emails []string) ([]Account, error) { - if platform == "" || len(emails) == 0 { - return []Account{}, nil - } - return s.accountRepo.ListByPlatformAndCredentialEmails(ctx, platform, emails) -} - func (s *adminServiceImpl) GetAccountsByIDs(ctx context.Context, ids []int64) ([]*Account, error) { if len(ids) == 0 { return []*Account{}, nil diff --git a/backend/internal/service/admin_service_delete_test.go b/backend/internal/service/admin_service_delete_test.go index afa433af..6472ccbb 100644 --- a/backend/internal/service/admin_service_delete_test.go +++ b/backend/internal/service/admin_service_delete_test.go @@ -93,6 +93,18 @@ func (s *userRepoStub) RemoveGroupFromAllowedGroups(ctx context.Context, groupID panic("unexpected RemoveGroupFromAllowedGroups call") } +func (s *userRepoStub) UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error { + panic("unexpected UpdateTotpSecret call") +} + +func (s *userRepoStub) EnableTotp(ctx context.Context, userID int64) error { + panic("unexpected EnableTotp call") +} + +func (s *userRepoStub) DisableTotp(ctx context.Context, userID int64) error { + panic("unexpected DisableTotp call") +} + type groupRepoStub struct { affectedUserIDs []int64 deleteErr error diff --git a/backend/internal/service/announcement.go b/backend/internal/service/announcement.go new file mode 100644 index 00000000..2ba5af5d --- /dev/null +++ b/backend/internal/service/announcement.go @@ -0,0 +1,64 @@ +package service + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/domain" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + AnnouncementStatusDraft = domain.AnnouncementStatusDraft + AnnouncementStatusActive = domain.AnnouncementStatusActive + AnnouncementStatusArchived = domain.AnnouncementStatusArchived +) + +const ( + AnnouncementConditionTypeSubscription = domain.AnnouncementConditionTypeSubscription + AnnouncementConditionTypeBalance = domain.AnnouncementConditionTypeBalance +) + +const ( + AnnouncementOperatorIn = domain.AnnouncementOperatorIn + AnnouncementOperatorGT = domain.AnnouncementOperatorGT + AnnouncementOperatorGTE = domain.AnnouncementOperatorGTE + AnnouncementOperatorLT = domain.AnnouncementOperatorLT + AnnouncementOperatorLTE = domain.AnnouncementOperatorLTE + AnnouncementOperatorEQ = domain.AnnouncementOperatorEQ +) + +var ( + ErrAnnouncementNotFound = domain.ErrAnnouncementNotFound + ErrAnnouncementInvalidTarget = domain.ErrAnnouncementInvalidTarget +) + +type AnnouncementTargeting = domain.AnnouncementTargeting + +type AnnouncementConditionGroup = domain.AnnouncementConditionGroup + +type AnnouncementCondition = domain.AnnouncementCondition + +type Announcement = domain.Announcement + +type AnnouncementListFilters struct { + Status string + Search string +} + +type AnnouncementRepository interface { + Create(ctx context.Context, a *Announcement) error + GetByID(ctx context.Context, id int64) (*Announcement, error) + Update(ctx context.Context, a *Announcement) error + Delete(ctx context.Context, id int64) error + + List(ctx context.Context, params pagination.PaginationParams, filters AnnouncementListFilters) ([]Announcement, *pagination.PaginationResult, error) + ListActive(ctx context.Context, now time.Time) ([]Announcement, error) +} + +type AnnouncementReadRepository interface { + MarkRead(ctx context.Context, announcementID, userID int64, readAt time.Time) error + GetReadMapByUser(ctx context.Context, userID int64, announcementIDs []int64) (map[int64]time.Time, error) + GetReadMapByUsers(ctx context.Context, announcementID int64, userIDs []int64) (map[int64]time.Time, error) + CountByAnnouncementID(ctx context.Context, announcementID int64) (int64, error) +} diff --git a/backend/internal/service/announcement_service.go b/backend/internal/service/announcement_service.go new file mode 100644 index 00000000..c2588e6c --- /dev/null +++ b/backend/internal/service/announcement_service.go @@ -0,0 +1,378 @@ +package service + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/domain" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +type AnnouncementService struct { + announcementRepo AnnouncementRepository + readRepo AnnouncementReadRepository + userRepo UserRepository + userSubRepo UserSubscriptionRepository +} + +func NewAnnouncementService( + announcementRepo AnnouncementRepository, + readRepo AnnouncementReadRepository, + userRepo UserRepository, + userSubRepo UserSubscriptionRepository, +) *AnnouncementService { + return &AnnouncementService{ + announcementRepo: announcementRepo, + readRepo: readRepo, + userRepo: userRepo, + userSubRepo: userSubRepo, + } +} + +type CreateAnnouncementInput struct { + Title string + Content string + Status string + Targeting AnnouncementTargeting + StartsAt *time.Time + EndsAt *time.Time + ActorID *int64 // 管理员用户ID +} + +type UpdateAnnouncementInput struct { + Title *string + Content *string + Status *string + Targeting *AnnouncementTargeting + StartsAt **time.Time + EndsAt **time.Time + ActorID *int64 // 管理员用户ID +} + +type UserAnnouncement struct { + Announcement Announcement + ReadAt *time.Time +} + +type AnnouncementUserReadStatus struct { + UserID int64 `json:"user_id"` + Email string `json:"email"` + Username string `json:"username"` + Balance float64 `json:"balance"` + Eligible bool `json:"eligible"` + ReadAt *time.Time `json:"read_at,omitempty"` +} + +func (s *AnnouncementService) Create(ctx context.Context, input *CreateAnnouncementInput) (*Announcement, error) { + if input == nil { + return nil, fmt.Errorf("create announcement: nil input") + } + + title := strings.TrimSpace(input.Title) + content := strings.TrimSpace(input.Content) + if title == "" || len(title) > 200 { + return nil, fmt.Errorf("create announcement: invalid title") + } + if content == "" { + return nil, fmt.Errorf("create announcement: content is required") + } + + status := strings.TrimSpace(input.Status) + if status == "" { + status = AnnouncementStatusDraft + } + if !isValidAnnouncementStatus(status) { + return nil, fmt.Errorf("create announcement: invalid status") + } + + targeting, err := domain.AnnouncementTargeting(input.Targeting).NormalizeAndValidate() + if err != nil { + return nil, err + } + + if input.StartsAt != nil && input.EndsAt != nil { + if !input.StartsAt.Before(*input.EndsAt) { + return nil, fmt.Errorf("create announcement: starts_at must be before ends_at") + } + } + + a := &Announcement{ + Title: title, + Content: content, + Status: status, + Targeting: targeting, + StartsAt: input.StartsAt, + EndsAt: input.EndsAt, + } + if input.ActorID != nil && *input.ActorID > 0 { + a.CreatedBy = input.ActorID + a.UpdatedBy = input.ActorID + } + + if err := s.announcementRepo.Create(ctx, a); err != nil { + return nil, fmt.Errorf("create announcement: %w", err) + } + return a, nil +} + +func (s *AnnouncementService) Update(ctx context.Context, id int64, input *UpdateAnnouncementInput) (*Announcement, error) { + if input == nil { + return nil, fmt.Errorf("update announcement: nil input") + } + + a, err := s.announcementRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Title != nil { + title := strings.TrimSpace(*input.Title) + if title == "" || len(title) > 200 { + return nil, fmt.Errorf("update announcement: invalid title") + } + a.Title = title + } + if input.Content != nil { + content := strings.TrimSpace(*input.Content) + if content == "" { + return nil, fmt.Errorf("update announcement: content is required") + } + a.Content = content + } + if input.Status != nil { + status := strings.TrimSpace(*input.Status) + if !isValidAnnouncementStatus(status) { + return nil, fmt.Errorf("update announcement: invalid status") + } + a.Status = status + } + + if input.Targeting != nil { + targeting, err := domain.AnnouncementTargeting(*input.Targeting).NormalizeAndValidate() + if err != nil { + return nil, err + } + a.Targeting = targeting + } + + if input.StartsAt != nil { + a.StartsAt = *input.StartsAt + } + if input.EndsAt != nil { + a.EndsAt = *input.EndsAt + } + + if a.StartsAt != nil && a.EndsAt != nil { + if !a.StartsAt.Before(*a.EndsAt) { + return nil, fmt.Errorf("update announcement: starts_at must be before ends_at") + } + } + + if input.ActorID != nil && *input.ActorID > 0 { + a.UpdatedBy = input.ActorID + } + + if err := s.announcementRepo.Update(ctx, a); err != nil { + return nil, fmt.Errorf("update announcement: %w", err) + } + return a, nil +} + +func (s *AnnouncementService) Delete(ctx context.Context, id int64) error { + if err := s.announcementRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete announcement: %w", err) + } + return nil +} + +func (s *AnnouncementService) GetByID(ctx context.Context, id int64) (*Announcement, error) { + return s.announcementRepo.GetByID(ctx, id) +} + +func (s *AnnouncementService) List(ctx context.Context, params pagination.PaginationParams, filters AnnouncementListFilters) ([]Announcement, *pagination.PaginationResult, error) { + return s.announcementRepo.List(ctx, params, filters) +} + +func (s *AnnouncementService) ListForUser(ctx context.Context, userID int64, unreadOnly bool) ([]UserAnnouncement, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + activeSubs, err := s.userSubRepo.ListActiveByUserID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("list active subscriptions: %w", err) + } + activeGroupIDs := make(map[int64]struct{}, len(activeSubs)) + for i := range activeSubs { + activeGroupIDs[activeSubs[i].GroupID] = struct{}{} + } + + now := time.Now() + anns, err := s.announcementRepo.ListActive(ctx, now) + if err != nil { + return nil, fmt.Errorf("list active announcements: %w", err) + } + + visible := make([]Announcement, 0, len(anns)) + ids := make([]int64, 0, len(anns)) + for i := range anns { + a := anns[i] + if !a.IsActiveAt(now) { + continue + } + if !a.Targeting.Matches(user.Balance, activeGroupIDs) { + continue + } + visible = append(visible, a) + ids = append(ids, a.ID) + } + + if len(visible) == 0 { + return []UserAnnouncement{}, nil + } + + readMap, err := s.readRepo.GetReadMapByUser(ctx, userID, ids) + if err != nil { + return nil, fmt.Errorf("get read map: %w", err) + } + + out := make([]UserAnnouncement, 0, len(visible)) + for i := range visible { + a := visible[i] + readAt, ok := readMap[a.ID] + if unreadOnly && ok { + continue + } + var ptr *time.Time + if ok { + t := readAt + ptr = &t + } + out = append(out, UserAnnouncement{ + Announcement: a, + ReadAt: ptr, + }) + } + + // 未读优先、同状态按创建时间倒序 + sort.Slice(out, func(i, j int) bool { + ai, aj := out[i], out[j] + if (ai.ReadAt == nil) != (aj.ReadAt == nil) { + return ai.ReadAt == nil + } + return ai.Announcement.ID > aj.Announcement.ID + }) + + return out, nil +} + +func (s *AnnouncementService) MarkRead(ctx context.Context, userID, announcementID int64) error { + // 安全:仅允许标记当前用户“可见”的公告 + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + a, err := s.announcementRepo.GetByID(ctx, announcementID) + if err != nil { + return err + } + + now := time.Now() + if !a.IsActiveAt(now) { + return ErrAnnouncementNotFound + } + + activeSubs, err := s.userSubRepo.ListActiveByUserID(ctx, userID) + if err != nil { + return fmt.Errorf("list active subscriptions: %w", err) + } + activeGroupIDs := make(map[int64]struct{}, len(activeSubs)) + for i := range activeSubs { + activeGroupIDs[activeSubs[i].GroupID] = struct{}{} + } + + if !a.Targeting.Matches(user.Balance, activeGroupIDs) { + return ErrAnnouncementNotFound + } + + if err := s.readRepo.MarkRead(ctx, announcementID, userID, now); err != nil { + return fmt.Errorf("mark read: %w", err) + } + return nil +} + +func (s *AnnouncementService) ListUserReadStatus( + ctx context.Context, + announcementID int64, + params pagination.PaginationParams, + search string, +) ([]AnnouncementUserReadStatus, *pagination.PaginationResult, error) { + ann, err := s.announcementRepo.GetByID(ctx, announcementID) + if err != nil { + return nil, nil, err + } + + filters := UserListFilters{ + Search: strings.TrimSpace(search), + } + + users, page, err := s.userRepo.ListWithFilters(ctx, params, filters) + if err != nil { + return nil, nil, fmt.Errorf("list users: %w", err) + } + + userIDs := make([]int64, 0, len(users)) + for i := range users { + userIDs = append(userIDs, users[i].ID) + } + + readMap, err := s.readRepo.GetReadMapByUsers(ctx, announcementID, userIDs) + if err != nil { + return nil, nil, fmt.Errorf("get read map: %w", err) + } + + out := make([]AnnouncementUserReadStatus, 0, len(users)) + for i := range users { + u := users[i] + subs, err := s.userSubRepo.ListActiveByUserID(ctx, u.ID) + if err != nil { + return nil, nil, fmt.Errorf("list active subscriptions: %w", err) + } + activeGroupIDs := make(map[int64]struct{}, len(subs)) + for j := range subs { + activeGroupIDs[subs[j].GroupID] = struct{}{} + } + + readAt, ok := readMap[u.ID] + var ptr *time.Time + if ok { + t := readAt + ptr = &t + } + + out = append(out, AnnouncementUserReadStatus{ + UserID: u.ID, + Email: u.Email, + Username: u.Username, + Balance: u.Balance, + Eligible: domain.AnnouncementTargeting(ann.Targeting).Matches(u.Balance, activeGroupIDs), + ReadAt: ptr, + }) + } + + return out, page, nil +} + +func isValidAnnouncementStatus(status string) bool { + switch status { + case AnnouncementStatusDraft, AnnouncementStatusActive, AnnouncementStatusArchived: + return true + default: + return false + } +} diff --git a/backend/internal/service/announcement_targeting_test.go b/backend/internal/service/announcement_targeting_test.go new file mode 100644 index 00000000..4d904c7d --- /dev/null +++ b/backend/internal/service/announcement_targeting_test.go @@ -0,0 +1,66 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAnnouncementTargeting_Matches_EmptyMatchesAll(t *testing.T) { + var targeting AnnouncementTargeting + require.True(t, targeting.Matches(0, nil)) + require.True(t, targeting.Matches(123.45, map[int64]struct{}{1: {}})) +} + +func TestAnnouncementTargeting_NormalizeAndValidate_RejectsEmptyGroup(t *testing.T) { + targeting := AnnouncementTargeting{ + AnyOf: []AnnouncementConditionGroup{ + {AllOf: nil}, + }, + } + _, err := targeting.NormalizeAndValidate() + require.Error(t, err) + require.ErrorIs(t, err, ErrAnnouncementInvalidTarget) +} + +func TestAnnouncementTargeting_NormalizeAndValidate_RejectsInvalidCondition(t *testing.T) { + targeting := AnnouncementTargeting{ + AnyOf: []AnnouncementConditionGroup{ + { + AllOf: []AnnouncementCondition{ + {Type: "balance", Operator: "between", Value: 10}, + }, + }, + }, + } + _, err := targeting.NormalizeAndValidate() + require.Error(t, err) + require.ErrorIs(t, err, ErrAnnouncementInvalidTarget) +} + +func TestAnnouncementTargeting_Matches_AndOrSemantics(t *testing.T) { + targeting := AnnouncementTargeting{ + AnyOf: []AnnouncementConditionGroup{ + { + AllOf: []AnnouncementCondition{ + {Type: AnnouncementConditionTypeBalance, Operator: AnnouncementOperatorGTE, Value: 100}, + {Type: AnnouncementConditionTypeSubscription, Operator: AnnouncementOperatorIn, GroupIDs: []int64{10}}, + }, + }, + { + AllOf: []AnnouncementCondition{ + {Type: AnnouncementConditionTypeBalance, Operator: AnnouncementOperatorLT, Value: 5}, + }, + }, + }, + } + + // 命中第 2 组(balance < 5) + require.True(t, targeting.Matches(4.99, nil)) + require.False(t, targeting.Matches(5, nil)) + + // 命中第 1 组(balance >= 100 AND 订阅 in [10]) + require.False(t, targeting.Matches(100, map[int64]struct{}{})) + require.False(t, targeting.Matches(99.9, map[int64]struct{}{10: {}})) + require.True(t, targeting.Matches(100, map[int64]struct{}{10: {}})) +} diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 6331acd8..dd52a559 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -26,7 +26,7 @@ import ( const ( antigravityStickySessionTTL = time.Hour - antigravityDefaultMaxRetries = 5 + antigravityDefaultMaxRetries = 3 antigravityRetryBaseDelay = 1 * time.Second antigravityRetryMaxDelay = 16 * time.Second ) @@ -52,11 +52,11 @@ type antigravityRetryLoopParams struct { action string body []byte quotaScope AntigravityQuotaScope + maxRetries int c *gin.Context httpUpstream HTTPUpstream settingService *SettingService handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) - maxRetries int // 可选,0 表示使用平台级默认值 } // antigravityRetryLoopResult 重试循环的结果 @@ -82,9 +82,10 @@ func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopRe if len(availableURLs) == 0 { availableURLs = baseURLs } + maxRetries := p.maxRetries if maxRetries <= 0 { - maxRetries = antigravityMaxRetries() + maxRetries = antigravityDefaultMaxRetries } var resp *http.Response @@ -161,7 +162,7 @@ urlFallbackLoop: continue urlFallbackLoop } - // 账户/模型配额限流,按最大重试次数做指数退避 + // 账户/模型配额限流,重试 3 次(指数退避) if attempt < maxRetries { upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) @@ -1044,7 +1045,7 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return &ForwardResult{ RequestID: requestID, Usage: *usage, - Model: billingModel, + Model: billingModel, // 计费模型(可按映射模型覆盖) Stream: claudeReq.Stream, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, @@ -1729,7 +1730,6 @@ func antigravityFallbackCooldownSeconds() (time.Duration, bool) { } return time.Duration(seconds) * time.Second, true } - func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { // 429 使用 Gemini 格式解析(从 body 解析重置时间) if statusCode == 429 { @@ -1742,9 +1742,6 @@ func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, pre fallbackMinutes = s.settingService.cfg.Gateway.AntigravityFallbackCooldownMinutes } defaultDur := time.Duration(fallbackMinutes) * time.Minute - if override, ok := antigravityFallbackCooldownSeconds(); ok { - defaultDur = override - } ra := time.Now().Add(defaultDur) if useScopeLimit { log.Printf("%s status=429 rate_limited scope=%s reset_in=%v (fallback)", prefix, quotaScope, defaultDur) @@ -2185,6 +2182,58 @@ func getOrCreateGeminiParts(response map[string]any) (result map[string]any, exi return result, existingParts, setParts } +// mergeCollectedPartsToResponse 将收集的所有 parts 合并到 Gemini 响应中 +// 这个函数会合并所有类型的 parts:text、thinking、functionCall、inlineData 等 +// 保持原始顺序,只合并连续的普通 text parts +func mergeCollectedPartsToResponse(response map[string]any, collectedParts []map[string]any) map[string]any { + if len(collectedParts) == 0 { + return response + } + + result, _, setParts := getOrCreateGeminiParts(response) + + // 合并策略: + // 1. 保持原始顺序 + // 2. 连续的普通 text parts 合并为一个 + // 3. thinking、functionCall、inlineData 等保持原样 + var mergedParts []any + var textBuffer strings.Builder + + flushTextBuffer := func() { + if textBuffer.Len() > 0 { + mergedParts = append(mergedParts, map[string]any{ + "text": textBuffer.String(), + }) + textBuffer.Reset() + } + } + + for _, part := range collectedParts { + // 检查是否是普通 text part + if text, ok := part["text"].(string); ok { + // 检查是否有 thought 标记 + if thought, _ := part["thought"].(bool); thought { + // thinking part,先刷新 text buffer,然后保留原样 + flushTextBuffer() + mergedParts = append(mergedParts, part) + } else { + // 普通 text,累积到 buffer + _, _ = textBuffer.WriteString(text) + } + } else { + // 非 text part(functionCall、inlineData 等),先刷新 text buffer,然后保留原样 + flushTextBuffer() + mergedParts = append(mergedParts, part) + } + } + + // 刷新剩余的 text + flushTextBuffer() + + setParts(mergedParts) + return result +} + // mergeImagePartsToResponse 将收集到的图片 parts 合并到 Gemini 响应中 func mergeImagePartsToResponse(response map[string]any, imageParts []map[string]any) map[string]any { if len(imageParts) == 0 { @@ -2372,8 +2421,7 @@ func (s *AntigravityGatewayService) handleClaudeStreamToNonStreaming(c *gin.Cont var firstTokenMs *int var last map[string]any var lastWithParts map[string]any - var collectedImageParts []map[string]any // 收集所有包含图片的 parts - var collectedTextParts []string // 收集所有文本片段 + var collectedParts []map[string]any // 收集所有 parts(包括 text、thinking、functionCall、inlineData 等) type scanEvent struct { line string @@ -2468,18 +2516,12 @@ func (s *AntigravityGatewayService) handleClaudeStreamToNonStreaming(c *gin.Cont last = parsed - // 保留最后一个有 parts 的响应 + // 保留最后一个有 parts 的响应,并收集所有 parts if parts := extractGeminiParts(parsed); len(parts) > 0 { lastWithParts = parsed - // 收集包含图片和文本的 parts - for _, part := range parts { - if _, ok := part["inlineData"].(map[string]any); ok { - collectedImageParts = append(collectedImageParts, part) - } - if text, ok := part["text"].(string); ok && text != "" { - collectedTextParts = append(collectedTextParts, text) - } - } + + // 收集所有 parts(text、thinking、functionCall、inlineData 等) + collectedParts = append(collectedParts, parts...) } case <-intervalCh: @@ -2502,32 +2544,13 @@ returnResponse: return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Empty response from upstream") } - // 如果收集到了图片 parts,需要合并到最终响应中 - if len(collectedImageParts) > 0 { - finalResponse = mergeImagePartsToResponse(finalResponse, collectedImageParts) - } - - // 如果收集到了文本,需要合并到最终响应中 - if len(collectedTextParts) > 0 { - finalResponse = mergeTextPartsToResponse(finalResponse, collectedTextParts) - } - - geminiPayload := finalResponse - if _, ok := finalResponse["response"]; !ok { - wrapped := map[string]any{ - "response": finalResponse, - } - if respID, ok := finalResponse["responseId"]; ok { - wrapped["responseId"] = respID - } - if modelVersion, ok := finalResponse["modelVersion"]; ok { - wrapped["modelVersion"] = modelVersion - } - geminiPayload = wrapped + // 将收集的所有 parts 合并到最终响应中 + if len(collectedParts) > 0 { + finalResponse = mergeCollectedPartsToResponse(finalResponse, collectedParts) } // 序列化为 JSON(Gemini 格式) - geminiBody, err := json.Marshal(geminiPayload) + geminiBody, err := json.Marshal(finalResponse) if err != nil { return nil, fmt.Errorf("failed to marshal gemini response: %w", err) } diff --git a/backend/internal/service/antigravity_model_mapping_test.go b/backend/internal/service/antigravity_model_mapping_test.go index 39000e4f..179a3520 100644 --- a/backend/internal/service/antigravity_model_mapping_test.go +++ b/backend/internal/service/antigravity_model_mapping_test.go @@ -30,7 +30,7 @@ func TestIsAntigravityModelSupported(t *testing.T) { {"可映射 - claude-3-haiku-20240307", "claude-3-haiku-20240307", true}, // Gemini 前缀透传 - {"Gemini前缀 - gemini-1.5-pro", "gemini-1.5-pro", true}, + {"Gemini前缀 - gemini-2.5-pro", "gemini-2.5-pro", true}, {"Gemini前缀 - gemini-unknown-model", "gemini-unknown-model", true}, {"Gemini前缀 - gemini-future-version", "gemini-future-version", true}, @@ -142,10 +142,10 @@ func TestAntigravityGatewayService_GetMappedModel(t *testing.T) { expected: "gemini-2.5-flash", }, { - name: "Gemini透传 - gemini-1.5-pro", - requestedModel: "gemini-1.5-pro", + name: "Gemini透传 - gemini-2.5-pro", + requestedModel: "gemini-2.5-pro", accountMapping: nil, - expected: "gemini-1.5-pro", + expected: "gemini-2.5-pro", }, { name: "Gemini透传 - gemini-future-model", diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go index 52293cd5..fa8379ed 100644 --- a/backend/internal/service/antigravity_oauth_service.go +++ b/backend/internal/service/antigravity_oauth_service.go @@ -142,12 +142,13 @@ func (s *AntigravityOAuthService) ExchangeCode(ctx context.Context, input *Antig result.Email = userInfo.Email } - // 获取 project_id(部分账户类型可能没有) - loadResp, _, err := client.LoadCodeAssist(ctx, tokenResp.AccessToken) - if err != nil { - fmt.Printf("[AntigravityOAuth] 警告: 获取 project_id 失败: %v\n", err) - } else if loadResp != nil && loadResp.CloudAICompanionProject != "" { - result.ProjectID = loadResp.CloudAICompanionProject + // 获取 project_id(部分账户类型可能没有),失败时重试 + projectID, loadErr := s.loadProjectIDWithRetry(ctx, tokenResp.AccessToken, proxyURL, 3) + if loadErr != nil { + fmt.Printf("[AntigravityOAuth] 警告: 获取 project_id 失败(重试后): %v\n", loadErr) + result.ProjectIDMissing = true + } else { + result.ProjectID = projectID } return result, nil @@ -237,21 +238,60 @@ func (s *AntigravityOAuthService) RefreshAccountToken(ctx context.Context, accou tokenInfo.Email = existingEmail } - // 每次刷新都调用 LoadCodeAssist 获取 project_id - client := antigravity.NewClient(proxyURL) - loadResp, _, err := client.LoadCodeAssist(ctx, tokenInfo.AccessToken) - if err != nil || loadResp == nil || loadResp.CloudAICompanionProject == "" { - // LoadCodeAssist 失败或返回空,保留原有 project_id,标记缺失 - existingProjectID := strings.TrimSpace(account.GetCredential("project_id")) + // 每次刷新都调用 LoadCodeAssist 获取 project_id,失败时重试 + existingProjectID := strings.TrimSpace(account.GetCredential("project_id")) + projectID, loadErr := s.loadProjectIDWithRetry(ctx, tokenInfo.AccessToken, proxyURL, 3) + + if loadErr != nil { + // LoadCodeAssist 失败,保留原有 project_id tokenInfo.ProjectID = existingProjectID - tokenInfo.ProjectIDMissing = true + // 只有从未获取过 project_id 且本次也获取失败时,才标记为真正缺失 + // 如果之前有 project_id,本次只是临时故障,不应标记为错误 + if existingProjectID == "" { + tokenInfo.ProjectIDMissing = true + } } else { - tokenInfo.ProjectID = loadResp.CloudAICompanionProject + tokenInfo.ProjectID = projectID } return tokenInfo, nil } +// loadProjectIDWithRetry 带重试机制获取 project_id +// 返回 project_id 和错误,失败时会重试指定次数 +func (s *AntigravityOAuthService) loadProjectIDWithRetry(ctx context.Context, accessToken, proxyURL string, maxRetries int) (string, error) { + var lastErr error + + for attempt := 0; attempt <= maxRetries; attempt++ { + if attempt > 0 { + // 指数退避:1s, 2s, 4s + backoff := time.Duration(1< 8*time.Second { + backoff = 8 * time.Second + } + time.Sleep(backoff) + } + + client := antigravity.NewClient(proxyURL) + loadResp, _, err := client.LoadCodeAssist(ctx, accessToken) + + if err == nil && loadResp != nil && loadResp.CloudAICompanionProject != "" { + return loadResp.CloudAICompanionProject, nil + } + + // 记录错误 + if err != nil { + lastErr = err + } else if loadResp == nil { + lastErr = fmt.Errorf("LoadCodeAssist 返回空响应") + } else { + lastErr = fmt.Errorf("LoadCodeAssist 返回空 project_id") + } + } + + return "", fmt.Errorf("获取 project_id 失败 (重试 %d 次后): %w", maxRetries, lastErr) +} + // BuildAccountCredentials 构建账户凭证 func (s *AntigravityOAuthService) BuildAccountCredentials(tokenInfo *AntigravityTokenInfo) map[string]any { creds := map[string]any{ diff --git a/backend/internal/service/antigravity_rate_limit_test.go b/backend/internal/service/antigravity_rate_limit_test.go index bf02364b..9535948c 100644 --- a/backend/internal/service/antigravity_rate_limit_test.go +++ b/backend/internal/service/antigravity_rate_limit_test.go @@ -38,6 +38,10 @@ func (s *stubAntigravityUpstream) Do(req *http.Request, proxyURL string, account }, nil } +func (s *stubAntigravityUpstream) DoWithTLS(req *http.Request, proxyURL string, accountID int64, accountConcurrency int, enableTLSFingerprint bool) (*http.Response, error) { + return s.Do(req, proxyURL, accountID, accountConcurrency) +} + type scopeLimitCall struct { accountID int64 scope AntigravityQuotaScope @@ -90,14 +94,14 @@ func TestAntigravityRetryLoop_URLFallback_UsesLatestSuccess(t *testing.T) { var handleErrorCalled bool result, err := antigravityRetryLoop(antigravityRetryLoopParams{ - prefix: "[test]", - ctx: context.Background(), - account: account, - proxyURL: "", - accessToken: "token", - action: "generateContent", - body: []byte(`{"input":"test"}`), - quotaScope: AntigravityQuotaScopeClaude, + prefix: "[test]", + ctx: context.Background(), + account: account, + proxyURL: "", + accessToken: "token", + action: "generateContent", + body: []byte(`{"input":"test"}`), + quotaScope: AntigravityQuotaScopeClaude, httpUpstream: upstream, handleError: func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { handleErrorCalled = true diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go index c5dc55db..94eca94d 100644 --- a/backend/internal/service/antigravity_token_provider.go +++ b/backend/internal/service/antigravity_token_provider.go @@ -4,6 +4,7 @@ import ( "context" "errors" "log" + "log/slog" "strconv" "strings" "time" @@ -101,21 +102,32 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * return "", errors.New("access_token not found in credentials") } - // 3. 存入缓存 + // 3. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) if p.tokenCache != nil { - ttl := 30 * time.Minute - if expiresAt != nil { - until := time.Until(*expiresAt) - switch { - case until > antigravityTokenCacheSkew: - ttl = until - antigravityTokenCacheSkew - case until > 0: - ttl = until - default: - ttl = time.Minute + latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) + if isStale && latestAccount != nil { + // 版本过时,使用 DB 中的最新 token + slog.Debug("antigravity_token_version_stale_use_latest", "account_id", account.ID) + accessToken = latestAccount.GetCredential("access_token") + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found after version check") } + // 不写入缓存,让下次请求重新处理 + } else { + ttl := 30 * time.Minute + if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > antigravityTokenCacheSkew: + ttl = until - antigravityTokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) } - _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) } return accessToken, nil diff --git a/backend/internal/service/antigravity_token_refresher.go b/backend/internal/service/antigravity_token_refresher.go index a07c86e6..e33f88d0 100644 --- a/backend/internal/service/antigravity_token_refresher.go +++ b/backend/internal/service/antigravity_token_refresher.go @@ -3,6 +3,8 @@ package service import ( "context" "fmt" + "log" + "strings" "time" ) @@ -55,15 +57,32 @@ func (r *AntigravityTokenRefresher) Refresh(ctx context.Context, account *Accoun } newCredentials := r.antigravityOAuthService.BuildAccountCredentials(tokenInfo) + // 合并旧的 credentials,保留新 credentials 中不存在的字段 for k, v := range account.Credentials { if _, exists := newCredentials[k]; !exists { newCredentials[k] = v } } - // 如果 project_id 获取失败,返回 credentials 但同时返回错误让账户被标记 + // 特殊处理 project_id:如果新值为空但旧值非空,保留旧值 + // 这确保了即使 LoadCodeAssist 失败,project_id 也不会丢失 + if newProjectID, _ := newCredentials["project_id"].(string); newProjectID == "" { + if oldProjectID := strings.TrimSpace(account.GetCredential("project_id")); oldProjectID != "" { + newCredentials["project_id"] = oldProjectID + } + } + + // 如果 project_id 获取失败,只记录警告,不返回错误 + // LoadCodeAssist 失败可能是临时网络问题,应该允许重试而不是立即标记为不可重试错误 + // Token 刷新本身是成功的(access_token 和 refresh_token 已更新) if tokenInfo.ProjectIDMissing { - return newCredentials, fmt.Errorf("missing_project_id: 账户缺少project id,可能无法使用Antigravity") + if tokenInfo.ProjectID != "" { + // 有旧的 project_id,本次获取失败,保留旧值 + log.Printf("[AntigravityTokenRefresher] Account %d: LoadCodeAssist 临时失败,保留旧 project_id", account.ID) + } else { + // 从未获取过 project_id,本次也失败,但不返回错误以允许下次重试 + log.Printf("[AntigravityTokenRefresher] Account %d: LoadCodeAssist 失败,project_id 缺失,但 token 已更新,将在下次刷新时重试", account.ID) + } } return newCredentials, nil diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go index b9f0ef36..5fb4edee 100644 --- a/backend/internal/service/api_key_auth_cache_impl.go +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -94,6 +94,20 @@ func (s *APIKeyService) initAuthCache(cfg *config.Config) { s.authCacheL1 = cache } +// StartAuthCacheInvalidationSubscriber starts the Pub/Sub subscriber for L1 cache invalidation. +// This should be called after the service is fully initialized. +func (s *APIKeyService) StartAuthCacheInvalidationSubscriber(ctx context.Context) { + if s.cache == nil || s.authCacheL1 == nil { + return + } + if err := s.cache.SubscribeAuthCacheInvalidation(ctx, func(cacheKey string) { + s.authCacheL1.Del(cacheKey) + }); err != nil { + // Log but don't fail - L1 cache will still work, just without cross-instance invalidation + println("[Service] Warning: failed to start auth cache invalidation subscriber:", err.Error()) + } +} + func (s *APIKeyService) authCacheKey(key string) string { sum := sha256.Sum256([]byte(key)) return hex.EncodeToString(sum[:]) @@ -149,6 +163,8 @@ func (s *APIKeyService) deleteAuthCache(ctx context.Context, cacheKey string) { return } _ = s.cache.DeleteAuthCache(ctx, cacheKey) + // Publish invalidation message to other instances + _ = s.cache.PublishAuthCacheInvalidation(ctx, cacheKey) } func (s *APIKeyService) loadAuthCacheEntry(ctx context.Context, key, cacheKey string) (*APIKeyAuthCacheEntry, error) { diff --git a/backend/internal/service/api_key_service.go b/backend/internal/service/api_key_service.go index ecc570c7..ef1ff990 100644 --- a/backend/internal/service/api_key_service.go +++ b/backend/internal/service/api_key_service.go @@ -65,6 +65,10 @@ type APIKeyCache interface { GetAuthCache(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) SetAuthCache(ctx context.Context, key string, entry *APIKeyAuthCacheEntry, ttl time.Duration) error DeleteAuthCache(ctx context.Context, key string) error + + // Pub/Sub for L1 cache invalidation across instances + PublishAuthCacheInvalidation(ctx context.Context, cacheKey string) error + SubscribeAuthCacheInvalidation(ctx context.Context, handler func(cacheKey string)) error } // APIKeyAuthCacheInvalidator 提供认证缓存失效能力 diff --git a/backend/internal/service/api_key_service_cache_test.go b/backend/internal/service/api_key_service_cache_test.go index 5f2d69c4..c5e9cd47 100644 --- a/backend/internal/service/api_key_service_cache_test.go +++ b/backend/internal/service/api_key_service_cache_test.go @@ -142,6 +142,14 @@ func (s *authCacheStub) DeleteAuthCache(ctx context.Context, key string) error { return nil } +func (s *authCacheStub) PublishAuthCacheInvalidation(ctx context.Context, cacheKey string) error { + return nil +} + +func (s *authCacheStub) SubscribeAuthCacheInvalidation(ctx context.Context, handler func(cacheKey string)) error { + return nil +} + func TestAPIKeyService_GetByKey_UsesL2Cache(t *testing.T) { cache := &authCacheStub{} repo := &authRepoStub{ diff --git a/backend/internal/service/api_key_service_delete_test.go b/backend/internal/service/api_key_service_delete_test.go index 32ae884e..092b7fce 100644 --- a/backend/internal/service/api_key_service_delete_test.go +++ b/backend/internal/service/api_key_service_delete_test.go @@ -168,6 +168,14 @@ func (s *apiKeyCacheStub) DeleteAuthCache(ctx context.Context, key string) error return nil } +func (s *apiKeyCacheStub) PublishAuthCacheInvalidation(ctx context.Context, cacheKey string) error { + return nil +} + +func (s *apiKeyCacheStub) SubscribeAuthCacheInvalidation(ctx context.Context, handler func(cacheKey string)) error { + return nil +} + // TestApiKeyService_Delete_OwnerMismatch 测试非所有者尝试删除时返回权限错误。 // 预期行为: // - GetKeyAndOwnerID 返回所有者 ID 为 1 diff --git a/backend/internal/service/auth_service.go b/backend/internal/service/auth_service.go index 386b43fc..f51fae24 100644 --- a/backend/internal/service/auth_service.go +++ b/backend/internal/service/auth_service.go @@ -153,8 +153,8 @@ func (s *AuthService) RegisterWithVerification(ctx context.Context, email, passw return "", nil, ErrServiceUnavailable } - // 应用优惠码(如果提供) - if promoCode != "" && s.promoService != nil { + // 应用优惠码(如果提供且功能已启用) + if promoCode != "" && s.promoService != nil && s.settingService != nil && s.settingService.IsPromoCodeEnabled(ctx) { if err := s.promoService.ApplyPromoCode(ctx, user.ID, promoCode); err != nil { // 优惠码应用失败不影响注册,只记录日志 log.Printf("[Auth] Failed to apply promo code for user %d: %v", user.ID, err) @@ -580,3 +580,149 @@ func (s *AuthService) RefreshToken(ctx context.Context, oldTokenString string) ( // 生成新token return s.GenerateToken(user) } + +// IsPasswordResetEnabled 检查是否启用密码重置功能 +// 要求:必须同时开启邮件验证且 SMTP 配置正确 +func (s *AuthService) IsPasswordResetEnabled(ctx context.Context) bool { + if s.settingService == nil { + return false + } + // Must have email verification enabled and SMTP configured + if !s.settingService.IsEmailVerifyEnabled(ctx) { + return false + } + return s.settingService.IsPasswordResetEnabled(ctx) +} + +// preparePasswordReset validates the password reset request and returns necessary data +// Returns (siteName, resetURL, shouldProceed) +// shouldProceed is false when we should silently return success (to prevent enumeration) +func (s *AuthService) preparePasswordReset(ctx context.Context, email, frontendBaseURL string) (string, string, bool) { + // Check if user exists (but don't reveal this to the caller) + user, err := s.userRepo.GetByEmail(ctx, email) + if err != nil { + if errors.Is(err, ErrUserNotFound) { + // Security: Log but don't reveal that user doesn't exist + log.Printf("[Auth] Password reset requested for non-existent email: %s", email) + return "", "", false + } + log.Printf("[Auth] Database error checking email for password reset: %v", err) + return "", "", false + } + + // Check if user is active + if !user.IsActive() { + log.Printf("[Auth] Password reset requested for inactive user: %s", email) + return "", "", false + } + + // Get site name + siteName := "Sub2API" + if s.settingService != nil { + siteName = s.settingService.GetSiteName(ctx) + } + + // Build reset URL base + resetURL := fmt.Sprintf("%s/reset-password", strings.TrimSuffix(frontendBaseURL, "/")) + + return siteName, resetURL, true +} + +// RequestPasswordReset 请求密码重置(同步发送) +// Security: Returns the same response regardless of whether the email exists (prevent user enumeration) +func (s *AuthService) RequestPasswordReset(ctx context.Context, email, frontendBaseURL string) error { + if !s.IsPasswordResetEnabled(ctx) { + return infraerrors.Forbidden("PASSWORD_RESET_DISABLED", "password reset is not enabled") + } + if s.emailService == nil { + return ErrServiceUnavailable + } + + siteName, resetURL, shouldProceed := s.preparePasswordReset(ctx, email, frontendBaseURL) + if !shouldProceed { + return nil // Silent success to prevent enumeration + } + + if err := s.emailService.SendPasswordResetEmail(ctx, email, siteName, resetURL); err != nil { + log.Printf("[Auth] Failed to send password reset email to %s: %v", email, err) + return nil // Silent success to prevent enumeration + } + + log.Printf("[Auth] Password reset email sent to: %s", email) + return nil +} + +// RequestPasswordResetAsync 异步请求密码重置(队列发送) +// Security: Returns the same response regardless of whether the email exists (prevent user enumeration) +func (s *AuthService) RequestPasswordResetAsync(ctx context.Context, email, frontendBaseURL string) error { + if !s.IsPasswordResetEnabled(ctx) { + return infraerrors.Forbidden("PASSWORD_RESET_DISABLED", "password reset is not enabled") + } + if s.emailQueueService == nil { + return ErrServiceUnavailable + } + + siteName, resetURL, shouldProceed := s.preparePasswordReset(ctx, email, frontendBaseURL) + if !shouldProceed { + return nil // Silent success to prevent enumeration + } + + if err := s.emailQueueService.EnqueuePasswordReset(email, siteName, resetURL); err != nil { + log.Printf("[Auth] Failed to enqueue password reset email for %s: %v", email, err) + return nil // Silent success to prevent enumeration + } + + log.Printf("[Auth] Password reset email enqueued for: %s", email) + return nil +} + +// ResetPassword 重置密码 +// Security: Increments TokenVersion to invalidate all existing JWT tokens +func (s *AuthService) ResetPassword(ctx context.Context, email, token, newPassword string) error { + // Check if password reset is enabled + if !s.IsPasswordResetEnabled(ctx) { + return infraerrors.Forbidden("PASSWORD_RESET_DISABLED", "password reset is not enabled") + } + + if s.emailService == nil { + return ErrServiceUnavailable + } + + // Verify and consume the reset token (one-time use) + if err := s.emailService.ConsumePasswordResetToken(ctx, email, token); err != nil { + return err + } + + // Get user + user, err := s.userRepo.GetByEmail(ctx, email) + if err != nil { + if errors.Is(err, ErrUserNotFound) { + return ErrInvalidResetToken // Token was valid but user was deleted + } + log.Printf("[Auth] Database error getting user for password reset: %v", err) + return ErrServiceUnavailable + } + + // Check if user is active + if !user.IsActive() { + return ErrUserNotActive + } + + // Hash new password + hashedPassword, err := s.HashPassword(newPassword) + if err != nil { + return fmt.Errorf("hash password: %w", err) + } + + // Update password and increment TokenVersion + user.PasswordHash = hashedPassword + user.TokenVersion++ // Invalidate all existing tokens + + if err := s.userRepo.Update(ctx, user); err != nil { + log.Printf("[Auth] Database error updating password for user %d: %v", user.ID, err) + return ErrServiceUnavailable + } + + log.Printf("[Auth] Password reset successful for user: %s", email) + return nil +} diff --git a/backend/internal/service/auth_service_register_test.go b/backend/internal/service/auth_service_register_test.go index bc8f6f68..e31ca561 100644 --- a/backend/internal/service/auth_service_register_test.go +++ b/backend/internal/service/auth_service_register_test.go @@ -71,6 +71,26 @@ func (s *emailCacheStub) DeleteVerificationCode(ctx context.Context, email strin return nil } +func (s *emailCacheStub) GetPasswordResetToken(ctx context.Context, email string) (*PasswordResetTokenData, error) { + return nil, nil +} + +func (s *emailCacheStub) SetPasswordResetToken(ctx context.Context, email string, data *PasswordResetTokenData, ttl time.Duration) error { + return nil +} + +func (s *emailCacheStub) DeletePasswordResetToken(ctx context.Context, email string) error { + return nil +} + +func (s *emailCacheStub) IsPasswordResetEmailInCooldown(ctx context.Context, email string) bool { + return false +} + +func (s *emailCacheStub) SetPasswordResetEmailCooldown(ctx context.Context, email string, ttl time.Duration) error { + return nil +} + func newAuthService(repo *userRepoStub, settings map[string]string, emailCache EmailCache) *AuthService { cfg := &config.Config{ JWT: config.JWTConfig{ diff --git a/backend/internal/service/claude_token_provider.go b/backend/internal/service/claude_token_provider.go index c7c6e42d..f6cab204 100644 --- a/backend/internal/service/claude_token_provider.go +++ b/backend/internal/service/claude_token_provider.go @@ -181,26 +181,37 @@ func (p *ClaudeTokenProvider) GetAccessToken(ctx context.Context, account *Accou return "", errors.New("access_token not found in credentials") } - // 3. 存入缓存 + // 3. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) if p.tokenCache != nil { - ttl := 30 * time.Minute - if refreshFailed { - // 刷新失败时使用短 TTL,避免失效 token 长时间缓存导致 401 抖动 - ttl = time.Minute - slog.Debug("claude_token_cache_short_ttl", "account_id", account.ID, "reason", "refresh_failed") - } else if expiresAt != nil { - until := time.Until(*expiresAt) - switch { - case until > claudeTokenCacheSkew: - ttl = until - claudeTokenCacheSkew - case until > 0: - ttl = until - default: - ttl = time.Minute + latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) + if isStale && latestAccount != nil { + // 版本过时,使用 DB 中的最新 token + slog.Debug("claude_token_version_stale_use_latest", "account_id", account.ID) + accessToken = latestAccount.GetCredential("access_token") + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found after version check") + } + // 不写入缓存,让下次请求重新处理 + } else { + ttl := 30 * time.Minute + if refreshFailed { + // 刷新失败时使用短 TTL,避免失效 token 长时间缓存导致 401 抖动 + ttl = time.Minute + slog.Debug("claude_token_cache_short_ttl", "account_id", account.ID, "reason", "refresh_failed") + } else if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > claudeTokenCacheSkew: + ttl = until - claudeTokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + if err := p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl); err != nil { + slog.Warn("claude_token_cache_set_failed", "account_id", account.ID, "error", err) } - } - if err := p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl); err != nil { - slog.Warn("claude_token_cache_set_failed", "account_id", account.ID, "error", err) } } diff --git a/backend/internal/service/dashboard_aggregation_service.go b/backend/internal/service/dashboard_aggregation_service.go index da5c0e7d..10c68868 100644 --- a/backend/internal/service/dashboard_aggregation_service.go +++ b/backend/internal/service/dashboard_aggregation_service.go @@ -20,12 +20,16 @@ var ( // ErrDashboardBackfillDisabled 当配置禁用回填时返回。 ErrDashboardBackfillDisabled = errors.New("仪表盘聚合回填已禁用") // ErrDashboardBackfillTooLarge 当回填跨度超过限制时返回。 - ErrDashboardBackfillTooLarge = errors.New("回填时间跨度过大") + ErrDashboardBackfillTooLarge = errors.New("回填时间跨度过大") + errDashboardAggregationRunning = errors.New("聚合作业正在运行") ) // DashboardAggregationRepository 定义仪表盘预聚合仓储接口。 type DashboardAggregationRepository interface { AggregateRange(ctx context.Context, start, end time.Time) error + // RecomputeRange 重新计算指定时间范围内的聚合数据(包含活跃用户等派生表)。 + // 设计目的:当 usage_logs 被批量删除/回滚后,确保聚合表可恢复一致性。 + RecomputeRange(ctx context.Context, start, end time.Time) error GetAggregationWatermark(ctx context.Context) (time.Time, error) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error @@ -112,6 +116,41 @@ func (s *DashboardAggregationService) TriggerBackfill(start, end time.Time) erro return nil } +// TriggerRecomputeRange 触发指定范围的重新计算(异步)。 +// 与 TriggerBackfill 不同: +// - 不依赖 backfill_enabled(这是内部一致性修复) +// - 不更新 watermark(避免影响正常增量聚合游标) +func (s *DashboardAggregationService) TriggerRecomputeRange(start, end time.Time) error { + if s == nil || s.repo == nil { + return errors.New("聚合服务未初始化") + } + if !s.cfg.Enabled { + return errors.New("聚合服务已禁用") + } + if !end.After(start) { + return errors.New("重新计算时间范围无效") + } + + go func() { + const maxRetries = 3 + for i := 0; i < maxRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationBackfillTimeout) + err := s.recomputeRange(ctx, start, end) + cancel() + if err == nil { + return + } + if !errors.Is(err, errDashboardAggregationRunning) { + log.Printf("[DashboardAggregation] 重新计算失败: %v", err) + return + } + time.Sleep(5 * time.Second) + } + log.Printf("[DashboardAggregation] 重新计算放弃: 聚合作业持续占用") + }() + return nil +} + func (s *DashboardAggregationService) recomputeRecentDays() { days := s.cfg.RecomputeDays if days <= 0 { @@ -128,6 +167,24 @@ func (s *DashboardAggregationService) recomputeRecentDays() { } } +func (s *DashboardAggregationService) recomputeRange(ctx context.Context, start, end time.Time) error { + if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + return errDashboardAggregationRunning + } + defer atomic.StoreInt32(&s.running, 0) + + jobStart := time.Now().UTC() + if err := s.repo.RecomputeRange(ctx, start, end); err != nil { + return err + } + log.Printf("[DashboardAggregation] 重新计算完成 (start=%s end=%s duration=%s)", + start.UTC().Format(time.RFC3339), + end.UTC().Format(time.RFC3339), + time.Since(jobStart).String(), + ) + return nil +} + func (s *DashboardAggregationService) runScheduledAggregation() { if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { return @@ -179,7 +236,7 @@ func (s *DashboardAggregationService) runScheduledAggregation() { func (s *DashboardAggregationService) backfillRange(ctx context.Context, start, end time.Time) error { if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { - return errors.New("聚合作业正在运行") + return errDashboardAggregationRunning } defer atomic.StoreInt32(&s.running, 0) diff --git a/backend/internal/service/dashboard_aggregation_service_test.go b/backend/internal/service/dashboard_aggregation_service_test.go index 2fc22105..a7058985 100644 --- a/backend/internal/service/dashboard_aggregation_service_test.go +++ b/backend/internal/service/dashboard_aggregation_service_test.go @@ -27,6 +27,10 @@ func (s *dashboardAggregationRepoTestStub) AggregateRange(ctx context.Context, s return s.aggregateErr } +func (s *dashboardAggregationRepoTestStub) RecomputeRange(ctx context.Context, start, end time.Time) error { + return s.AggregateRange(ctx, start, end) +} + func (s *dashboardAggregationRepoTestStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { return s.watermark, nil } diff --git a/backend/internal/service/dashboard_service.go b/backend/internal/service/dashboard_service.go index a9811919..cd11923e 100644 --- a/backend/internal/service/dashboard_service.go +++ b/backend/internal/service/dashboard_service.go @@ -124,16 +124,16 @@ func (s *DashboardService) GetDashboardStats(ctx context.Context) (*usagestats.D return stats, nil } -func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { - trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) +func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { + trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream, billingType) if err != nil { return nil, fmt.Errorf("get usage trend with filters: %w", err) } return trend, nil } -func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream) +func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType) if err != nil { return nil, fmt.Errorf("get model stats with filters: %w", err) } diff --git a/backend/internal/service/dashboard_service_test.go b/backend/internal/service/dashboard_service_test.go index db3c78c3..59b83e66 100644 --- a/backend/internal/service/dashboard_service_test.go +++ b/backend/internal/service/dashboard_service_test.go @@ -101,6 +101,10 @@ func (s *dashboardAggregationRepoStub) AggregateRange(ctx context.Context, start return nil } +func (s *dashboardAggregationRepoStub) RecomputeRange(ctx context.Context, start, end time.Time) error { + return nil +} + func (s *dashboardAggregationRepoStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { if s.err != nil { return time.Time{}, s.err diff --git a/backend/internal/service/domain_constants.go b/backend/internal/service/domain_constants.go index 49bb86a7..eee8bddd 100644 --- a/backend/internal/service/domain_constants.go +++ b/backend/internal/service/domain_constants.go @@ -1,66 +1,68 @@ package service +import "github.com/Wei-Shaw/sub2api/internal/domain" + // Status constants const ( - StatusActive = "active" - StatusDisabled = "disabled" - StatusError = "error" - StatusUnused = "unused" - StatusUsed = "used" - StatusExpired = "expired" + StatusActive = domain.StatusActive + StatusDisabled = domain.StatusDisabled + StatusError = domain.StatusError + StatusUnused = domain.StatusUnused + StatusUsed = domain.StatusUsed + StatusExpired = domain.StatusExpired ) // Role constants const ( - RoleAdmin = "admin" - RoleUser = "user" + RoleAdmin = domain.RoleAdmin + RoleUser = domain.RoleUser ) // Platform constants const ( - PlatformAnthropic = "anthropic" - PlatformOpenAI = "openai" - PlatformGemini = "gemini" - PlatformAntigravity = "antigravity" + PlatformAnthropic = domain.PlatformAnthropic + PlatformOpenAI = domain.PlatformOpenAI + PlatformGemini = domain.PlatformGemini + PlatformAntigravity = domain.PlatformAntigravity ) // Account type constants const ( - AccountTypeOAuth = "oauth" // OAuth类型账号(full scope: profile + inference) - AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope) - AccountTypeAPIKey = "apikey" // API Key类型账号 + AccountTypeOAuth = domain.AccountTypeOAuth // OAuth类型账号(full scope: profile + inference) + AccountTypeSetupToken = domain.AccountTypeSetupToken // Setup Token类型账号(inference only scope) + AccountTypeAPIKey = domain.AccountTypeAPIKey // API Key类型账号 ) // Redeem type constants const ( - RedeemTypeBalance = "balance" - RedeemTypeConcurrency = "concurrency" - RedeemTypeSubscription = "subscription" + RedeemTypeBalance = domain.RedeemTypeBalance + RedeemTypeConcurrency = domain.RedeemTypeConcurrency + RedeemTypeSubscription = domain.RedeemTypeSubscription ) // PromoCode status constants const ( - PromoCodeStatusActive = "active" - PromoCodeStatusDisabled = "disabled" + PromoCodeStatusActive = domain.PromoCodeStatusActive + PromoCodeStatusDisabled = domain.PromoCodeStatusDisabled ) // Admin adjustment type constants const ( - AdjustmentTypeAdminBalance = "admin_balance" // 管理员调整余额 - AdjustmentTypeAdminConcurrency = "admin_concurrency" // 管理员调整并发数 + AdjustmentTypeAdminBalance = domain.AdjustmentTypeAdminBalance // 管理员调整余额 + AdjustmentTypeAdminConcurrency = domain.AdjustmentTypeAdminConcurrency // 管理员调整并发数 ) // Group subscription type constants const ( - SubscriptionTypeStandard = "standard" // 标准计费模式(按余额扣费) - SubscriptionTypeSubscription = "subscription" // 订阅模式(按限额控制) + SubscriptionTypeStandard = domain.SubscriptionTypeStandard // 标准计费模式(按余额扣费) + SubscriptionTypeSubscription = domain.SubscriptionTypeSubscription // 订阅模式(按限额控制) ) // Subscription status constants const ( - SubscriptionStatusActive = "active" - SubscriptionStatusExpired = "expired" - SubscriptionStatusSuspended = "suspended" + SubscriptionStatusActive = domain.SubscriptionStatusActive + SubscriptionStatusExpired = domain.SubscriptionStatusExpired + SubscriptionStatusSuspended = domain.SubscriptionStatusSuspended ) // LinuxDoConnectSyntheticEmailDomain 是 LinuxDo Connect 用户的合成邮箱后缀(RFC 保留域名)。 @@ -69,8 +71,10 @@ const LinuxDoConnectSyntheticEmailDomain = "@linuxdo-connect.invalid" // Setting keys const ( // 注册设置 - SettingKeyRegistrationEnabled = "registration_enabled" // 是否开放注册 - SettingKeyEmailVerifyEnabled = "email_verify_enabled" // 是否开启邮件验证 + SettingKeyRegistrationEnabled = "registration_enabled" // 是否开放注册 + SettingKeyEmailVerifyEnabled = "email_verify_enabled" // 是否开启邮件验证 + SettingKeyPromoCodeEnabled = "promo_code_enabled" // 是否启用优惠码功能 + SettingKeyPasswordResetEnabled = "password_reset_enabled" // 是否启用忘记密码功能(需要先开启邮件验证) // 邮件服务设置 SettingKeySMTPHost = "smtp_host" // SMTP服务器地址 @@ -86,6 +90,9 @@ const ( SettingKeyTurnstileSiteKey = "turnstile_site_key" // Turnstile Site Key SettingKeyTurnstileSecretKey = "turnstile_secret_key" // Turnstile Secret Key + // TOTP 双因素认证设置 + SettingKeyTotpEnabled = "totp_enabled" // 是否启用 TOTP 2FA 功能 + // LinuxDo Connect OAuth 登录设置 SettingKeyLinuxDoConnectEnabled = "linuxdo_connect_enabled" SettingKeyLinuxDoConnectClientID = "linuxdo_connect_client_id" @@ -93,13 +100,16 @@ const ( SettingKeyLinuxDoConnectRedirectURL = "linuxdo_connect_redirect_url" // OEM设置 - SettingKeySiteName = "site_name" // 网站名称 - SettingKeySiteLogo = "site_logo" // 网站Logo (base64) - SettingKeySiteSubtitle = "site_subtitle" // 网站副标题 - SettingKeyAPIBaseURL = "api_base_url" // API端点地址(用于客户端配置和导入) - SettingKeyContactInfo = "contact_info" // 客服联系方式 - SettingKeyDocURL = "doc_url" // 文档链接 - SettingKeyHomeContent = "home_content" // 首页内容(支持 Markdown/HTML,或 URL 作为 iframe src) + SettingKeySiteName = "site_name" // 网站名称 + SettingKeySiteLogo = "site_logo" // 网站Logo (base64) + SettingKeySiteSubtitle = "site_subtitle" // 网站副标题 + SettingKeyAPIBaseURL = "api_base_url" // API端点地址(用于客户端配置和导入) + SettingKeyContactInfo = "contact_info" // 客服联系方式 + SettingKeyDocURL = "doc_url" // 文档链接 + SettingKeyHomeContent = "home_content" // 首页内容(支持 Markdown/HTML,或 URL 作为 iframe src) + SettingKeyHideCcsImportButton = "hide_ccs_import_button" // 是否隐藏 API Keys 页面的导入 CCS 按钮 + SettingKeyPurchaseSubscriptionEnabled = "purchase_subscription_enabled" // 是否展示“购买订阅”页面入口 + SettingKeyPurchaseSubscriptionURL = "purchase_subscription_url" // “购买订阅”页面 URL(作为 iframe src) // 默认配置 SettingKeyDefaultConcurrency = "default_concurrency" // 新用户默认并发量 diff --git a/backend/internal/service/email_queue_service.go b/backend/internal/service/email_queue_service.go index 1c22702c..6c975c69 100644 --- a/backend/internal/service/email_queue_service.go +++ b/backend/internal/service/email_queue_service.go @@ -8,11 +8,18 @@ import ( "time" ) +// Task type constants +const ( + TaskTypeVerifyCode = "verify_code" + TaskTypePasswordReset = "password_reset" +) + // EmailTask 邮件发送任务 type EmailTask struct { Email string SiteName string - TaskType string // "verify_code" + TaskType string // "verify_code" or "password_reset" + ResetURL string // Only used for password_reset task type } // EmailQueueService 异步邮件队列服务 @@ -73,12 +80,18 @@ func (s *EmailQueueService) processTask(workerID int, task EmailTask) { defer cancel() switch task.TaskType { - case "verify_code": + case TaskTypeVerifyCode: if err := s.emailService.SendVerifyCode(ctx, task.Email, task.SiteName); err != nil { log.Printf("[EmailQueue] Worker %d failed to send verify code to %s: %v", workerID, task.Email, err) } else { log.Printf("[EmailQueue] Worker %d sent verify code to %s", workerID, task.Email) } + case TaskTypePasswordReset: + if err := s.emailService.SendPasswordResetEmailWithCooldown(ctx, task.Email, task.SiteName, task.ResetURL); err != nil { + log.Printf("[EmailQueue] Worker %d failed to send password reset to %s: %v", workerID, task.Email, err) + } else { + log.Printf("[EmailQueue] Worker %d sent password reset to %s", workerID, task.Email) + } default: log.Printf("[EmailQueue] Worker %d unknown task type: %s", workerID, task.TaskType) } @@ -89,7 +102,7 @@ func (s *EmailQueueService) EnqueueVerifyCode(email, siteName string) error { task := EmailTask{ Email: email, SiteName: siteName, - TaskType: "verify_code", + TaskType: TaskTypeVerifyCode, } select { @@ -101,6 +114,24 @@ func (s *EmailQueueService) EnqueueVerifyCode(email, siteName string) error { } } +// EnqueuePasswordReset 将密码重置邮件任务加入队列 +func (s *EmailQueueService) EnqueuePasswordReset(email, siteName, resetURL string) error { + task := EmailTask{ + Email: email, + SiteName: siteName, + TaskType: TaskTypePasswordReset, + ResetURL: resetURL, + } + + select { + case s.taskChan <- task: + log.Printf("[EmailQueue] Enqueued password reset task for %s", email) + return nil + default: + return fmt.Errorf("email queue is full") + } +} + // Stop 停止队列服务 func (s *EmailQueueService) Stop() { close(s.stopChan) diff --git a/backend/internal/service/email_service.go b/backend/internal/service/email_service.go index 55e137d6..44edf7f7 100644 --- a/backend/internal/service/email_service.go +++ b/backend/internal/service/email_service.go @@ -3,11 +3,14 @@ package service import ( "context" "crypto/rand" + "crypto/subtle" "crypto/tls" + "encoding/hex" "fmt" "log" "math/big" "net/smtp" + "net/url" "strconv" "time" @@ -19,6 +22,9 @@ var ( ErrInvalidVerifyCode = infraerrors.BadRequest("INVALID_VERIFY_CODE", "invalid or expired verification code") ErrVerifyCodeTooFrequent = infraerrors.TooManyRequests("VERIFY_CODE_TOO_FREQUENT", "please wait before requesting a new code") ErrVerifyCodeMaxAttempts = infraerrors.TooManyRequests("VERIFY_CODE_MAX_ATTEMPTS", "too many failed attempts, please request a new code") + + // Password reset errors + ErrInvalidResetToken = infraerrors.BadRequest("INVALID_RESET_TOKEN", "invalid or expired password reset token") ) // EmailCache defines cache operations for email service @@ -26,6 +32,16 @@ type EmailCache interface { GetVerificationCode(ctx context.Context, email string) (*VerificationCodeData, error) SetVerificationCode(ctx context.Context, email string, data *VerificationCodeData, ttl time.Duration) error DeleteVerificationCode(ctx context.Context, email string) error + + // Password reset token methods + GetPasswordResetToken(ctx context.Context, email string) (*PasswordResetTokenData, error) + SetPasswordResetToken(ctx context.Context, email string, data *PasswordResetTokenData, ttl time.Duration) error + DeletePasswordResetToken(ctx context.Context, email string) error + + // Password reset email cooldown methods + // Returns true if in cooldown period (email was sent recently) + IsPasswordResetEmailInCooldown(ctx context.Context, email string) bool + SetPasswordResetEmailCooldown(ctx context.Context, email string, ttl time.Duration) error } // VerificationCodeData represents verification code data @@ -35,10 +51,22 @@ type VerificationCodeData struct { CreatedAt time.Time } +// PasswordResetTokenData represents password reset token data +type PasswordResetTokenData struct { + Token string + CreatedAt time.Time +} + const ( verifyCodeTTL = 15 * time.Minute verifyCodeCooldown = 1 * time.Minute maxVerifyCodeAttempts = 5 + + // Password reset token settings + passwordResetTokenTTL = 30 * time.Minute + + // Password reset email cooldown (prevent email bombing) + passwordResetEmailCooldown = 30 * time.Second ) // SMTPConfig SMTP配置 @@ -254,8 +282,8 @@ func (s *EmailService) VerifyCode(ctx context.Context, email, code string) error return ErrVerifyCodeMaxAttempts } - // 验证码不匹配 - if data.Code != code { + // 验证码不匹配 (constant-time comparison to prevent timing attacks) + if subtle.ConstantTimeCompare([]byte(data.Code), []byte(code)) != 1 { data.Attempts++ if err := s.cache.SetVerificationCode(ctx, email, data, verifyCodeTTL); err != nil { log.Printf("[Email] Failed to update verification attempt count: %v", err) @@ -357,3 +385,157 @@ func (s *EmailService) TestSMTPConnectionWithConfig(config *SMTPConfig) error { return client.Quit() } + +// GeneratePasswordResetToken generates a secure 32-byte random token (64 hex characters) +func (s *EmailService) GeneratePasswordResetToken() (string, error) { + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// SendPasswordResetEmail sends a password reset email with a reset link +func (s *EmailService) SendPasswordResetEmail(ctx context.Context, email, siteName, resetURL string) error { + var token string + var needSaveToken bool + + // Check if token already exists + existing, err := s.cache.GetPasswordResetToken(ctx, email) + if err == nil && existing != nil { + // Token exists, reuse it (allows resending email without generating new token) + token = existing.Token + needSaveToken = false + } else { + // Generate new token + token, err = s.GeneratePasswordResetToken() + if err != nil { + return fmt.Errorf("generate token: %w", err) + } + needSaveToken = true + } + + // Save token to Redis (only if new token generated) + if needSaveToken { + data := &PasswordResetTokenData{ + Token: token, + CreatedAt: time.Now(), + } + if err := s.cache.SetPasswordResetToken(ctx, email, data, passwordResetTokenTTL); err != nil { + return fmt.Errorf("save reset token: %w", err) + } + } + + // Build full reset URL with URL-encoded token and email + fullResetURL := fmt.Sprintf("%s?email=%s&token=%s", resetURL, url.QueryEscape(email), url.QueryEscape(token)) + + // Build email content + subject := fmt.Sprintf("[%s] 密码重置请求", siteName) + body := s.buildPasswordResetEmailBody(fullResetURL, siteName) + + // Send email + if err := s.SendEmail(ctx, email, subject, body); err != nil { + return fmt.Errorf("send email: %w", err) + } + + return nil +} + +// SendPasswordResetEmailWithCooldown sends password reset email with cooldown check (called by queue worker) +// This method wraps SendPasswordResetEmail with email cooldown to prevent email bombing +func (s *EmailService) SendPasswordResetEmailWithCooldown(ctx context.Context, email, siteName, resetURL string) error { + // Check email cooldown to prevent email bombing + if s.cache.IsPasswordResetEmailInCooldown(ctx, email) { + log.Printf("[Email] Password reset email skipped (cooldown): %s", email) + return nil // Silent success to prevent revealing cooldown to attackers + } + + // Send email using core method + if err := s.SendPasswordResetEmail(ctx, email, siteName, resetURL); err != nil { + return err + } + + // Set cooldown marker (Redis TTL handles expiration) + if err := s.cache.SetPasswordResetEmailCooldown(ctx, email, passwordResetEmailCooldown); err != nil { + log.Printf("[Email] Failed to set password reset cooldown for %s: %v", email, err) + } + + return nil +} + +// VerifyPasswordResetToken verifies the password reset token without consuming it +func (s *EmailService) VerifyPasswordResetToken(ctx context.Context, email, token string) error { + data, err := s.cache.GetPasswordResetToken(ctx, email) + if err != nil || data == nil { + return ErrInvalidResetToken + } + + // Use constant-time comparison to prevent timing attacks + if subtle.ConstantTimeCompare([]byte(data.Token), []byte(token)) != 1 { + return ErrInvalidResetToken + } + + return nil +} + +// ConsumePasswordResetToken verifies and deletes the token (one-time use) +func (s *EmailService) ConsumePasswordResetToken(ctx context.Context, email, token string) error { + // Verify first + if err := s.VerifyPasswordResetToken(ctx, email, token); err != nil { + return err + } + + // Delete after verification (one-time use) + if err := s.cache.DeletePasswordResetToken(ctx, email); err != nil { + log.Printf("[Email] Failed to delete password reset token after consumption: %v", err) + } + return nil +} + +// buildPasswordResetEmailBody builds the HTML content for password reset email +func (s *EmailService) buildPasswordResetEmailBody(resetURL, siteName string) string { + return fmt.Sprintf(` + + + + + + + +
+
+

%s

+
+
+

密码重置请求

+

您已请求重置密码。请点击下方按钮设置新密码:

+
重置密码 +
+

此链接将在 30 分钟后失效。

+

如果您没有请求重置密码,请忽略此邮件。您的密码将保持不变。

+
+ +
+ +
+ + +`, siteName, resetURL, resetURL) +} diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index ccae80fe..26eb24e4 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -96,9 +96,6 @@ func (m *mockAccountRepoForPlatform) ListActive(ctx context.Context) ([]Account, func (m *mockAccountRepoForPlatform) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { return nil, nil } -func (m *mockAccountRepoForPlatform) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) { - return nil, nil -} func (m *mockAccountRepoForPlatform) UpdateLastUsed(ctx context.Context, id int64) error { return nil } @@ -185,6 +182,7 @@ var _ AccountRepository = (*mockAccountRepoForPlatform)(nil) // mockGatewayCacheForPlatform 单平台测试用的 cache mock type mockGatewayCacheForPlatform struct { sessionBindings map[string]int64 + deletedSessions map[string]int } func (m *mockGatewayCacheForPlatform) GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) { @@ -206,6 +204,18 @@ func (m *mockGatewayCacheForPlatform) RefreshSessionTTL(ctx context.Context, gro return nil } +func (m *mockGatewayCacheForPlatform) DeleteSessionAccountID(ctx context.Context, groupID int64, sessionHash string) error { + if m.sessionBindings == nil { + return nil + } + if m.deletedSessions == nil { + m.deletedSessions = make(map[string]int) + } + m.deletedSessions[sessionHash]++ + delete(m.sessionBindings, sessionHash) + return nil +} + type mockGroupRepoForGateway struct { groups map[int64]*Group getByIDCalls int @@ -629,6 +639,363 @@ func TestGatewayService_SelectAccountForModelWithPlatform_StickySession(t *testi }) } +func TestGatewayService_SelectAccountForModelWithExclusions_ForcePlatform(t *testing.T) { + ctx := context.Background() + ctx = context.WithValue(ctx, ctxkey.ForcePlatform, PlatformAntigravity) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + require.Equal(t, PlatformAntigravity, acc.Platform) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_RoutedStickySessionClears(t *testing.T) { + ctx := context.Background() + groupID := int64(10) + requestedModel := "claude-3-5-sonnet-20241022" + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusDisabled, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-group", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {1, 2}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, &groupID, "session-123", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + require.Equal(t, 1, cache.deletedSessions["session-123"]) + require.Equal(t, int64(2), cache.sessionBindings["session-123"]) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_RoutedStickySessionHit(t *testing.T) { + ctx := context.Background() + groupID := int64(11) + requestedModel := "claude-3-5-sonnet-20241022" + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-456": 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-group-hit", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {1, 2}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, &groupID, "session-456", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_RoutedFallbackToNormal(t *testing.T) { + ctx := context.Background() + groupID := int64(12) + requestedModel := "claude-3-5-sonnet-20241022" + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-fallback", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {99}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, &groupID, "", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_NoModelSupport(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + { + ID: 1, + Platform: PlatformAnthropic, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"claude-3-5-haiku-20241022": "claude-3-5-haiku-20241022"}}, + }, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "supporting model") +} + +func TestGatewayService_SelectAccountForModelWithPlatform_GeminiPreferOAuth(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeAPIKey}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-2.5-pro", nil, PlatformGemini) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_StickyInGroup(t *testing.T) { + ctx := context.Background() + groupID := int64(50) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, AccountGroups: []AccountGroup{{GroupID: groupID}}}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, AccountGroups: []AccountGroup{{GroupID: groupID}}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-group": 1}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, &groupID, "session-group", "", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_StickyModelMismatchFallback(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + { + ID: 1, + Platform: PlatformAnthropic, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"claude-3-5-haiku-20241022": "claude-3-5-haiku-20241022"}}, + }, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-miss": 1}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "session-miss", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_PreferNeverUsed(t *testing.T) { + ctx := context.Background() + lastUsed := time.Now().Add(-1 * time.Hour) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: &lastUsed}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) +} + +func TestGatewayService_SelectAccountForModelWithPlatform_NoAccounts(t *testing.T) { + ctx := context.Background() + repo := &mockAccountRepoForPlatform{ + accounts: []Account{}, + accountsByID: map[int64]*Account{}, + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "", nil, PlatformAnthropic) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "no available accounts") +} + func TestGatewayService_isModelSupportedByAccount(t *testing.T) { svc := &GatewayService{} @@ -746,6 +1113,301 @@ func TestGatewayService_selectAccountWithMixedScheduling(t *testing.T) { require.Equal(t, int64(2), acc.ID, "应选择优先级最高的账户(包含启用混合调度的antigravity)") }) + t.Run("混合调度-路由优先选择路由账号", func(t *testing.T) { + groupID := int64(30) + requestedModel := "claude-3-5-sonnet-20241022" + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-mixed-select", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {2}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, &groupID, "", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + }) + + t.Run("混合调度-路由粘性命中", func(t *testing.T) { + groupID := int64(31) + requestedModel := "claude-3-5-sonnet-20241022" + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}, AccountGroups: []AccountGroup{{GroupID: groupID}}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-777": 2}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-mixed-sticky", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {2}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, &groupID, "session-777", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + }) + + t.Run("混合调度-路由账号缺失回退", func(t *testing.T) { + groupID := int64(32) + requestedModel := "claude-3-5-sonnet-20241022" + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-mixed-miss", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {99}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, &groupID, "", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) + }) + + t.Run("混合调度-路由账号未启用mixed_scheduling回退", func(t *testing.T) { + groupID := int64(33) + requestedModel := "claude-3-5-sonnet-20241022" + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true}, // 未启用 mixed_scheduling + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-mixed-disabled", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {2}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, &groupID, "", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) + }) + + t.Run("混合调度-路由过滤覆盖", func(t *testing.T) { + groupID := int64(35) + requestedModel := "claude-3-5-sonnet-20241022" + resetAt := time.Now().Add(10 * time.Minute) + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: false}, + {ID: 3, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, + { + ID: 4, + Platform: PlatformAnthropic, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Extra: map[string]any{ + "model_rate_limits": map[string]any{ + "claude_sonnet": map[string]any{ + "rate_limit_reset_at": resetAt.Format(time.RFC3339), + }, + }, + }, + }, + { + ID: 5, + Platform: PlatformAnthropic, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"claude-3-5-haiku-20241022": "claude-3-5-haiku-20241022"}}, + }, + {ID: 6, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 7, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-mixed-filter", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {1, 2, 3, 4, 5, 6, 7}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + excluded := map[int64]struct{}{1: {}} + acc, err := svc.selectAccountWithMixedScheduling(ctx, &groupID, "", requestedModel, excluded, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(7), acc.ID) + }) + + t.Run("混合调度-粘性命中分组账号", func(t *testing.T) { + groupID := int64(34) + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, AccountGroups: []AccountGroup{{GroupID: groupID}}}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, AccountGroups: []AccountGroup{{GroupID: groupID}}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-group": 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, &groupID, "session-group", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) + }) + t.Run("混合调度-过滤未启用mixed_scheduling的antigravity账户", func(t *testing.T) { repo := &mockAccountRepoForPlatform{ accounts: []Account{ @@ -829,6 +1491,85 @@ func TestGatewayService_selectAccountWithMixedScheduling(t *testing.T) { require.Equal(t, int64(1), acc.ID, "粘性会话绑定的账户未启用mixed_scheduling,应降级选择anthropic账户") }) + t.Run("混合调度-粘性会话不可调度-清理并回退", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusDisabled, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 1}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "session-123", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + require.Equal(t, 1, cache.deletedSessions["session-123"]) + require.Equal(t, int64(2), cache.sessionBindings["session-123"]) + }) + + t.Run("混合调度-路由粘性不可调度-清理并回退", func(t *testing.T) { + groupID := int64(12) + requestedModel := "claude-3-5-sonnet-20241022" + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusDisabled, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Name: "route-mixed", + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + requestedModel: {1, 2}, + }, + }, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + groupRepo: groupRepo, + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, &groupID, "session-123", requestedModel, nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + require.Equal(t, 1, cache.deletedSessions["session-123"]) + require.Equal(t, int64(2), cache.sessionBindings["session-123"]) + }) + t.Run("混合调度-仅有启用mixed_scheduling的antigravity账户", func(t *testing.T) { repo := &mockAccountRepoForPlatform{ accounts: []Account{ @@ -879,6 +1620,65 @@ func TestGatewayService_selectAccountWithMixedScheduling(t *testing.T) { require.Nil(t, acc) require.Contains(t, err.Error(), "no available accounts") }) + + t.Run("混合调度-不支持模型返回错误", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + { + ID: 1, + Platform: PlatformAnthropic, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"claude-3-5-haiku-20241022": "claude-3-5-haiku-20241022"}}, + }, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "supporting model") + }) + + t.Run("混合调度-优先未使用账号", func(t *testing.T) { + lastUsed := time.Now().Add(-2 * time.Hour) + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: &lastUsed}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + }) } // TestAccount_IsMixedSchedulingEnabled 测试混合调度开关检查 @@ -965,10 +1765,20 @@ func (m *mockConcurrencyService) GetAccountWaitingCount(ctx context.Context, acc type mockConcurrencyCache struct { acquireAccountCalls int loadBatchCalls int + acquireResults map[int64]bool + loadBatchErr error + loadMap map[int64]*AccountLoadInfo + waitCounts map[int64]int + skipDefaultLoad bool } func (m *mockConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { m.acquireAccountCalls++ + if m.acquireResults != nil { + if result, ok := m.acquireResults[accountID]; ok { + return result, nil + } + } return true, nil } @@ -989,6 +1799,11 @@ func (m *mockConcurrencyCache) DecrementAccountWaitCount(ctx context.Context, ac } func (m *mockConcurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + if m.waitCounts != nil { + if count, ok := m.waitCounts[accountID]; ok { + return count, nil + } + } return 0, nil } @@ -1014,8 +1829,25 @@ func (m *mockConcurrencyCache) DecrementWaitCount(ctx context.Context, userID in func (m *mockConcurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { m.loadBatchCalls++ + if m.loadBatchErr != nil { + return nil, m.loadBatchErr + } result := make(map[int64]*AccountLoadInfo, len(accounts)) + if m.skipDefaultLoad && m.loadMap != nil { + for _, acc := range accounts { + if load, ok := m.loadMap[acc.ID]; ok { + result[acc.ID] = load + } + } + return result, nil + } for _, acc := range accounts { + if m.loadMap != nil { + if load, ok := m.loadMap[acc.ID]; ok { + result[acc.ID] = load + continue + } + } result[acc.ID] = &AccountLoadInfo{ AccountID: acc.ID, CurrentConcurrency: 0, @@ -1254,6 +2086,48 @@ func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { require.Equal(t, 1, concurrencyCache.loadBatchCalls, "应继续进行负载批量查询") }) + t.Run("粘性账号禁用-清理会话并回退选择", func(t *testing.T) { + testCtx := context.WithValue(ctx, ctxkey.ForcePlatform, PlatformAnthropic) + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: false, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + repo.listPlatformFunc = func(ctx context.Context, platform string) ([]Account, error) { + return repo.accounts, nil + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"sticky": 1}, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(testCtx, nil, "sticky", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "粘性账号禁用时应回退到可用账号") + updatedID, ok := cache.sessionBindings["sticky"] + require.True(t, ok, "粘性会话应更新绑定") + require.Equal(t, int64(2), updatedID, "粘性会话应绑定到新账号") + }) + t.Run("无可用账号-返回错误", func(t *testing.T) { repo := &mockAccountRepoForPlatform{ accounts: []Account{}, @@ -1343,6 +2217,751 @@ func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { require.NotNil(t, result.Account) require.Equal(t, int64(2), result.Account.ID, "应跳过过载账号,选择可用账号") }) + + t.Run("粘性账号槽位满-返回粘性等待计划", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"sticky": 1}, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + cfg.Gateway.Scheduling.StickySessionMaxWaiting = 1 + + concurrencyCache := &mockConcurrencyCache{ + acquireResults: map[int64]bool{1: false}, + waitCounts: map[int64]int{1: 0}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "sticky", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.WaitPlan) + require.Equal(t, int64(1), result.Account.ID) + require.Equal(t, 0, concurrencyCache.loadBatchCalls) + }) + + t.Run("负载批量查询失败-降级旧顺序选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + loadBatchErr: errors.New("load batch failed"), + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "legacy", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID) + require.Equal(t, int64(2), cache.sessionBindings["legacy"]) + }) + + t.Run("模型路由-粘性账号等待计划", func(t *testing.T) { + groupID := int64(20) + sessionHash := "route-sticky" + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{sessionHash: 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-5-sonnet-20241022": {1, 2}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + cfg.Gateway.Scheduling.StickySessionMaxWaiting = 1 + + concurrencyCache := &mockConcurrencyCache{ + acquireResults: map[int64]bool{1: false}, + waitCounts: map[int64]int{1: 0}, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, sessionHash, "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.WaitPlan) + require.Equal(t, int64(1), result.Account.ID) + }) + + t.Run("模型路由-粘性账号命中", func(t *testing.T) { + groupID := int64(20) + sessionHash := "route-hit" + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{sessionHash: 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-5-sonnet-20241022": {1, 2}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, sessionHash, "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(1), result.Account.ID) + require.Equal(t, 0, concurrencyCache.loadBatchCalls) + }) + + t.Run("模型路由-粘性账号缺失-清理并回退", func(t *testing.T) { + groupID := int64(22) + sessionHash := "route-missing" + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{sessionHash: 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-5-sonnet-20241022": {1, 2}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, sessionHash, "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID) + require.Equal(t, 1, cache.deletedSessions[sessionHash]) + require.Equal(t, int64(2), cache.sessionBindings[sessionHash]) + }) + + t.Run("模型路由-按负载选择账号", func(t *testing.T) { + groupID := int64(21) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-5-sonnet-20241022": {1, 2}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 80}, + 2: {AccountID: 2, LoadRate: 20}, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, "route", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID) + require.Equal(t, int64(2), cache.sessionBindings["route"]) + }) + + t.Run("模型路由-路由账号全满返回等待计划", func(t *testing.T) { + groupID := int64(23) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-5-sonnet-20241022": {1, 2}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + acquireResults: map[int64]bool{1: false, 2: false}, + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 10}, + 2: {AccountID: 2, LoadRate: 20}, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, "route-full", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.WaitPlan) + require.Equal(t, int64(1), result.Account.ID) + }) + + t.Run("模型路由-路由账号全满-回退普通选择", func(t *testing.T) { + groupID := int64(22) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 3, Platform: PlatformAnthropic, Priority: 0, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-5-sonnet-20241022": {1, 2}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 100}, + 2: {AccountID: 2, LoadRate: 100}, + 3: {AccountID: 3, LoadRate: 0}, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, "fallback", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(3), result.Account.ID) + require.Equal(t, int64(3), cache.sessionBindings["fallback"]) + }) + + t.Run("负载批量失败且无法获取-兜底等待", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + loadBatchErr: errors.New("load batch failed"), + acquireResults: map[int64]bool{1: false, 2: false}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.WaitPlan) + require.Equal(t, int64(1), result.Account.ID) + }) + + t.Run("Gemini负载排序-优先OAuth", func(t *testing.T) { + groupID := int64(24) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, Type: AccountTypeAPIKey}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, Type: AccountTypeOAuth}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformGemini, + Status: StatusActive, + Hydrated: true, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 10}, + 2: {AccountID: 2, LoadRate: 10}, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, "gemini", "gemini-2.5-pro", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID) + }) + + t.Run("模型路由-过滤路径覆盖", func(t *testing.T) { + groupID := int64(70) + now := time.Now().Add(10 * time.Minute) + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 3, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: false, Concurrency: 5}, + {ID: 4, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + { + ID: 5, + Platform: PlatformAnthropic, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Concurrency: 5, + Extra: map[string]any{ + "model_rate_limits": map[string]any{ + "claude_sonnet": map[string]any{ + "rate_limit_reset_at": now.Format(time.RFC3339), + }, + }, + }, + }, + { + ID: 6, + Platform: PlatformAnthropic, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Concurrency: 5, + Credentials: map[string]any{"model_mapping": map[string]any{"claude-3-5-haiku-20241022": "claude-3-5-haiku-20241022"}}, + }, + {ID: 7, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-3-5-sonnet-20241022": {1, 2, 3, 4, 5, 6}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + excluded := map[int64]struct{}{1: {}} + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, "", "claude-3-5-sonnet-20241022", excluded, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(7), result.Account.ID) + }) + + t.Run("ClaudeCode限制-回退分组", func(t *testing.T) { + groupID := int64(60) + fallbackID := int64(61) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ClaudeCodeOnly: true, + FallbackGroupID: func() *int64 { + v := fallbackID + return &v + }(), + }, + fallbackID: { + ID: fallbackID, + Platform: PlatformGemini, + Status: StatusActive, + Hydrated: true, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: &mockGatewayCacheForPlatform{}, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, "", "gemini-2.5-pro", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(1), result.Account.ID) + }) + + t.Run("ClaudeCode限制-无降级返回错误", func(t *testing.T) { + groupID := int64(62) + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ClaudeCodeOnly: true, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: &mockAccountRepoForPlatform{}, + groupRepo: groupRepo, + cache: &mockGatewayCacheForPlatform{}, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, "", "claude-3-5-sonnet-20241022", nil, "") + require.Error(t, err) + require.Nil(t, result) + require.ErrorIs(t, err, ErrClaudeCodeOnly) + }) + + t.Run("负载可用但无法获取槽位-兜底等待", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + acquireResults: map[int64]bool{1: false, 2: false}, + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 10}, + 2: {AccountID: 2, LoadRate: 20}, + }, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "wait", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.WaitPlan) + require.Equal(t, int64(1), result.Account.ID) + }) + + t.Run("负载信息缺失-使用默认负载", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 50}, + }, + skipDefaultLoad: true, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "missing-load", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID) + }) } func TestGatewayService_GroupResolution_ReusesContextGroup(t *testing.T) { diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index a7ded8a9..0fb9eced 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "log" + "log/slog" mathrand "math/rand" "net/http" "os" @@ -113,11 +114,24 @@ var allowedHeaders = map[string]bool{ "content-type": true, } -// GatewayCache defines cache operations for gateway service +// GatewayCache 定义网关服务的缓存操作接口。 +// 提供粘性会话(Sticky Session)的存储、查询、刷新和删除功能。 +// +// GatewayCache defines cache operations for gateway service. +// Provides sticky session storage, retrieval, refresh and deletion capabilities. type GatewayCache interface { + // GetSessionAccountID 获取粘性会话绑定的账号 ID + // Get the account ID bound to a sticky session GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) + // SetSessionAccountID 设置粘性会话与账号的绑定关系 + // Set the binding between sticky session and account SetSessionAccountID(ctx context.Context, groupID int64, sessionHash string, accountID int64, ttl time.Duration) error + // RefreshSessionTTL 刷新粘性会话的过期时间 + // Refresh the expiration time of a sticky session RefreshSessionTTL(ctx context.Context, groupID int64, sessionHash string, ttl time.Duration) error + // DeleteSessionAccountID 删除粘性会话绑定,用于账号不可用时主动清理 + // Delete sticky session binding, used to proactively clean up when account becomes unavailable + DeleteSessionAccountID(ctx context.Context, groupID int64, sessionHash string) error } // derefGroupID safely dereferences *int64 to int64, returning 0 if nil @@ -128,6 +142,28 @@ func derefGroupID(groupID *int64) int64 { return *groupID } +// shouldClearStickySession 检查账号是否处于不可调度状态,需要清理粘性会话绑定。 +// 当账号状态为错误、禁用、不可调度,或处于临时不可调度期间时,返回 true。 +// 这确保后续请求不会继续使用不可用的账号。 +// +// shouldClearStickySession checks if an account is in an unschedulable state +// and the sticky session binding should be cleared. +// Returns true when account status is error/disabled, schedulable is false, +// or within temporary unschedulable period. +// This ensures subsequent requests won't continue using unavailable accounts. +func shouldClearStickySession(account *Account) bool { + if account == nil { + return false + } + if account.Status == StatusError || account.Status == StatusDisabled || !account.Schedulable { + return true + } + if account.TempUnschedulableUntil != nil && time.Now().Before(*account.TempUnschedulableUntil) { + return true + } + return false +} + type AccountWaitPlan struct { AccountID int64 MaxConcurrency int @@ -284,6 +320,19 @@ func (s *GatewayService) BindStickySession(ctx context.Context, groupID *int64, return s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, accountID, stickySessionTTL) } +// GetCachedSessionAccountID retrieves the account ID bound to a sticky session. +// Returns 0 if no binding exists or on error. +func (s *GatewayService) GetCachedSessionAccountID(ctx context.Context, groupID *int64, sessionHash string) (int64, error) { + if sessionHash == "" || s.cache == nil { + return 0, nil + } + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err != nil { + return 0, err + } + return accountID, nil +} + func (s *GatewayService) extractCacheableContent(parsed *ParsedRequest) string { if parsed == nil { return "" @@ -426,11 +475,20 @@ func (s *GatewayService) SelectAccountForModelWithExclusions(ctx context.Context } // SelectAccountWithLoadAwareness selects account with load-awareness and wait plan. -// metadataUserID: 原始 metadata.user_id 字段(用于提取会话 UUID 进行会话数量限制) +// metadataUserID: 已废弃参数,会话限制现在统一使用 sessionHash func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, metadataUserID string) (*AccountSelectionResult, error) { + // 调试日志:记录调度入口参数 + excludedIDsList := make([]int64, 0, len(excludedIDs)) + for id := range excludedIDs { + excludedIDsList = append(excludedIDsList, id) + } + slog.Debug("account_scheduling_starting", + "group_id", derefGroupID(groupID), + "model", requestedModel, + "session", shortSessionHash(sessionHash), + "excluded_ids", excludedIDsList) + cfg := s.schedulingConfig() - // 提取会话 UUID(用于会话数量限制) - sessionUUID := extractSessionUUID(metadataUserID) var stickyAccountID int64 if sessionHash != "" && s.cache != nil { @@ -456,41 +514,63 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro } if s.concurrencyService == nil || !cfg.LoadBatchEnabled { - account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) - if err != nil { - return nil, err + // 复制排除列表,用于会话限制拒绝时的重试 + localExcluded := make(map[int64]struct{}) + for k, v := range excludedIDs { + localExcluded[k] = v } - result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) - if err == nil && result.Acquired { - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { - waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) - if waitingCount < cfg.StickySessionMaxWaiting { + + for { + account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, localExcluded) + if err != nil { + return nil, err + } + + result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err == nil && result.Acquired { + // 获取槽位后检查会话限制(使用 sessionHash 作为会话标识符) + if !s.checkAndRegisterSession(ctx, account, sessionHash) { + result.ReleaseFunc() // 释放槽位 + localExcluded[account.ID] = struct{}{} // 排除此账号 + continue // 重新选择 + } return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: account.ID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, }, nil } + + // 对于等待计划的情况,也需要先检查会话限制 + if !s.checkAndRegisterSession(ctx, account, sessionHash) { + localExcluded[account.ID] = struct{}{} + continue + } + + if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil } - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: account.ID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.FallbackWaitTimeout, - MaxWaiting: cfg.FallbackMaxWaiting, - }, - }, nil } platform, hasForcePlatform, err := s.resolvePlatform(ctx, groupID, group) @@ -606,7 +686,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro result, err := s.tryAcquireAccountSlot(ctx, stickyAccountID, stickyAccount.Concurrency) if err == nil && result.Acquired { // 会话数量限制检查 - if !s.checkAndRegisterSession(ctx, stickyAccount, sessionUUID) { + if !s.checkAndRegisterSession(ctx, stickyAccount, sessionHash) { result.ReleaseFunc() // 释放槽位 // 继续到负载感知选择 } else { @@ -624,18 +704,25 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, stickyAccountID) if waitingCount < cfg.StickySessionMaxWaiting { - return &AccountSelectionResult{ - Account: stickyAccount, - WaitPlan: &AccountWaitPlan{ - AccountID: stickyAccountID, - MaxConcurrency: stickyAccount.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil + // 会话数量限制检查(等待计划也需要占用会话配额) + if !s.checkAndRegisterSession(ctx, stickyAccount, sessionHash) { + // 会话限制已满,继续到负载感知选择 + } else { + return &AccountSelectionResult{ + Account: stickyAccount, + WaitPlan: &AccountWaitPlan{ + AccountID: stickyAccountID, + MaxConcurrency: stickyAccount.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } } // 粘性账号槽位满且等待队列已满,继续使用负载感知选择 } + } else { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), sessionHash) } } } @@ -693,7 +780,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) if err == nil && result.Acquired { // 会话数量限制检查 - if !s.checkAndRegisterSession(ctx, item.account, sessionUUID) { + if !s.checkAndRegisterSession(ctx, item.account, sessionHash) { result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 continue } @@ -711,20 +798,26 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro } } - // 5. 所有路由账号槽位满,返回等待计划(选择负载最低的) - acc := routingAvailable[0].account - if s.debugModelRoutingEnabled() { - log.Printf("[ModelRoutingDebug] routed wait: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), acc.ID) + // 5. 所有路由账号槽位满,尝试返回等待计划(选择负载最低的) + // 遍历找到第一个满足会话限制的账号 + for _, item := range routingAvailable { + if !s.checkAndRegisterSession(ctx, item.account, sessionHash) { + continue // 会话限制已满,尝试下一个 + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] routed wait: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), item.account.ID) + } + return &AccountSelectionResult{ + Account: item.account, + WaitPlan: &AccountWaitPlan{ + AccountID: item.account.ID, + MaxConcurrency: item.account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil } - return &AccountSelectionResult{ - Account: acc, - WaitPlan: &AccountWaitPlan{ - AccountID: acc.ID, - MaxConcurrency: acc.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil + // 所有路由账号会话限制都已满,继续到 Layer 2 回退 } // 路由列表中的账号都不可用(负载率 >= 100),继续到 Layer 2 回退 log.Printf("[ModelRouting] All routed accounts unavailable for model=%s, falling back to normal selection", requestedModel) @@ -736,37 +829,53 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) if err == nil && accountID > 0 && !isExcluded(accountID) { account, ok := accountByID[accountID] - if ok && s.isAccountInGroup(account, groupID) && - s.isAccountAllowedForPlatform(account, platform, useMixed) && - account.IsSchedulableForModel(requestedModel) && - (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) && - s.isAccountSchedulableForWindowCost(ctx, account, true) { // 粘性会话窗口费用检查 - result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) - if err == nil && result.Acquired { - // 会话数量限制检查 - if !s.checkAndRegisterSession(ctx, account, sessionUUID) { - result.ReleaseFunc() // 释放槽位,继续到 Layer 2 - } else { - _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL) - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } + if ok { + // 检查账户是否需要清理粘性会话绑定 + // Check if the account needs sticky session cleanup + clearSticky := shouldClearStickySession(account) + if clearSticky { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), sessionHash) } + if !clearSticky && s.isAccountInGroup(account, groupID) && + s.isAccountAllowedForPlatform(account, platform, useMixed) && + account.IsSchedulableForModel(requestedModel) && + (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) && + s.isAccountSchedulableForWindowCost(ctx, account, true) { // 粘性会话窗口费用检查 + result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if err == nil && result.Acquired { + // 会话数量限制检查 + // Session count limit check + if !s.checkAndRegisterSession(ctx, account, sessionHash) { + result.ReleaseFunc() // 释放槽位,继续到 Layer 2 + } else { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } - waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) - if waitingCount < cfg.StickySessionMaxWaiting { - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: accountID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + // 会话数量限制检查(等待计划也需要占用会话配额) + // Session count limit check (wait plan also requires session quota) + if !s.checkAndRegisterSession(ctx, account, sessionHash) { + // 会话限制已满,继续到 Layer 2 + // Session limit full, continue to Layer 2 + } else { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } } } } @@ -815,7 +924,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) if err != nil { - if result, ok := s.tryAcquireByLegacyOrder(ctx, candidates, groupID, sessionHash, preferOAuth, sessionUUID); ok { + if result, ok := s.tryAcquireByLegacyOrder(ctx, candidates, groupID, sessionHash, preferOAuth); ok { return result, nil } } else { @@ -865,7 +974,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) if err == nil && result.Acquired { // 会话数量限制检查 - if !s.checkAndRegisterSession(ctx, item.account, sessionUUID) { + if !s.checkAndRegisterSession(ctx, item.account, sessionHash) { result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 continue } @@ -885,6 +994,10 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro // ============ Layer 3: 兜底排队 ============ s.sortCandidatesForFallback(candidates, preferOAuth, cfg.FallbackSelectionMode) for _, acc := range candidates { + // 会话数量限制检查(等待计划也需要占用会话配额) + if !s.checkAndRegisterSession(ctx, acc, sessionHash) { + continue // 会话限制已满,尝试下一个账号 + } return &AccountSelectionResult{ Account: acc, WaitPlan: &AccountWaitPlan{ @@ -898,7 +1011,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro return nil, errors.New("no available accounts") } -func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates []*Account, groupID *int64, sessionHash string, preferOAuth bool, sessionUUID string) (*AccountSelectionResult, bool) { +func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates []*Account, groupID *int64, sessionHash string, preferOAuth bool) (*AccountSelectionResult, bool) { ordered := append([]*Account(nil), candidates...) sortAccountsByPriorityAndLastUsed(ordered, preferOAuth) @@ -906,7 +1019,7 @@ func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) if err == nil && result.Acquired { // 会话数量限制检查 - if !s.checkAndRegisterSession(ctx, acc, sessionUUID) { + if !s.checkAndRegisterSession(ctx, acc, sessionHash) { result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 continue } @@ -1067,7 +1180,24 @@ func (s *GatewayService) resolvePlatform(ctx context.Context, groupID *int64, gr func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, bool, error) { if s.schedulerSnapshot != nil { - return s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + accounts, useMixed, err := s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + if err == nil { + slog.Debug("account_scheduling_list_snapshot", + "group_id", derefGroupID(groupID), + "platform", platform, + "use_mixed", useMixed, + "count", len(accounts)) + for _, acc := range accounts { + slog.Debug("account_scheduling_account_detail", + "account_id", acc.ID, + "name", acc.Name, + "platform", acc.Platform, + "type", acc.Type, + "status", acc.Status, + "tls_fingerprint", acc.IsTLSFingerprintEnabled()) + } + } + return accounts, useMixed, err } useMixed := (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform if useMixed { @@ -1080,6 +1210,10 @@ func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *i accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, platforms) } if err != nil { + slog.Debug("account_scheduling_list_failed", + "group_id", derefGroupID(groupID), + "platform", platform, + "error", err) return nil, useMixed, err } filtered := make([]Account, 0, len(accounts)) @@ -1089,6 +1223,20 @@ func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *i } filtered = append(filtered, acc) } + slog.Debug("account_scheduling_list_mixed", + "group_id", derefGroupID(groupID), + "platform", platform, + "raw_count", len(accounts), + "filtered_count", len(filtered)) + for _, acc := range filtered { + slog.Debug("account_scheduling_account_detail", + "account_id", acc.ID, + "name", acc.Name, + "platform", acc.Platform, + "type", acc.Type, + "status", acc.Status, + "tls_fingerprint", acc.IsTLSFingerprintEnabled()) + } return filtered, useMixed, nil } @@ -1103,8 +1251,25 @@ func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *i accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) } if err != nil { + slog.Debug("account_scheduling_list_failed", + "group_id", derefGroupID(groupID), + "platform", platform, + "error", err) return nil, useMixed, err } + slog.Debug("account_scheduling_list_single", + "group_id", derefGroupID(groupID), + "platform", platform, + "count", len(accounts)) + for _, acc := range accounts { + slog.Debug("account_scheduling_account_detail", + "account_id", acc.ID, + "name", acc.Name, + "platform", acc.Platform, + "type", acc.Type, + "status", acc.Status, + "tls_fingerprint", acc.IsTLSFingerprintEnabled()) + } return accounts, useMixed, nil } @@ -1170,12 +1335,8 @@ func (s *GatewayService) isAccountSchedulableForWindowCost(ctx context.Context, // 缓存未命中,从数据库查询 { - var startTime time.Time - if account.SessionWindowStart != nil { - startTime = *account.SessionWindowStart - } else { - startTime = time.Now().Add(-5 * time.Hour) - } + // 使用统一的窗口开始时间计算逻辑(考虑窗口过期情况) + startTime := account.GetCurrentWindowStartTime() stats, err := s.usageLogRepo.GetAccountWindowStats(ctx, account.ID, startTime) if err != nil { @@ -1208,15 +1369,16 @@ checkSchedulability: // checkAndRegisterSession 检查并注册会话,用于会话数量限制 // 仅适用于 Anthropic OAuth/SetupToken 账号 +// sessionID: 会话标识符(使用粘性会话的 hash) // 返回 true 表示允许(在限制内或会话已存在),false 表示拒绝(超出限制且是新会话) -func (s *GatewayService) checkAndRegisterSession(ctx context.Context, account *Account, sessionUUID string) bool { +func (s *GatewayService) checkAndRegisterSession(ctx context.Context, account *Account, sessionID string) bool { // 只检查 Anthropic OAuth/SetupToken 账号 if !account.IsAnthropicOAuthOrSetupToken() { return true } maxSessions := account.GetMaxSessions() - if maxSessions <= 0 || sessionUUID == "" { + if maxSessions <= 0 || sessionID == "" { return true // 未启用会话限制或无会话ID } @@ -1226,7 +1388,7 @@ func (s *GatewayService) checkAndRegisterSession(ctx context.Context, account *A idleTimeout := time.Duration(account.GetSessionIdleTimeoutMinutes()) * time.Minute - allowed, err := s.sessionLimitCache.RegisterSession(ctx, account.ID, sessionUUID, maxSessions, idleTimeout) + allowed, err := s.sessionLimitCache.RegisterSession(ctx, account.ID, sessionID, maxSessions, idleTimeout) if err != nil { // 失败开放:缓存错误时允许通过 return true @@ -1234,18 +1396,6 @@ func (s *GatewayService) checkAndRegisterSession(ctx context.Context, account *A return allowed } -// extractSessionUUID 从 metadata.user_id 中提取会话 UUID -// 格式: user_{64位hex}_account__session_{uuid} -func extractSessionUUID(metadataUserID string) string { - if metadataUserID == "" { - return "" - } - if match := sessionIDRegex.FindStringSubmatch(metadataUserID); len(match) > 1 { - return match[1] - } - return "" -} - func (s *GatewayService) getSchedulableAccount(ctx context.Context, accountID int64) (*Account, error) { if s.schedulerSnapshot != nil { return s.schedulerSnapshot.GetAccount(ctx, accountID) @@ -1348,14 +1498,20 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, if _, excluded := excludedIDs[accountID]; !excluded { account, err := s.getSchedulableAccount(ctx, accountID) // 检查账号分组归属和平台匹配(确保粘性会话不会跨分组或跨平台) - if err == nil && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { - if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { - log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + if err == nil { + clearSticky := shouldClearStickySession(account) + if clearSticky { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), sessionHash) } - if s.debugModelRoutingEnabled() { - log.Printf("[ModelRoutingDebug] legacy routed sticky hit: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), accountID) + if !clearSticky && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy routed sticky hit: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), accountID) + } + return account, nil } - return account, nil } } } @@ -1445,11 +1601,17 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, if _, excluded := excludedIDs[accountID]; !excluded { account, err := s.getSchedulableAccount(ctx, accountID) // 检查账号分组归属和平台匹配(确保粘性会话不会跨分组或跨平台) - if err == nil && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { - if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { - log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + if err == nil { + clearSticky := shouldClearStickySession(account) + if clearSticky { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + } + if !clearSticky && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + return account, nil } - return account, nil } } } @@ -1549,15 +1711,21 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g if _, excluded := excludedIDs[accountID]; !excluded { account, err := s.getSchedulableAccount(ctx, accountID) // 检查账号分组归属和有效性:原生平台直接匹配,antigravity 需要启用混合调度 - if err == nil && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { - if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { - if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { - log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + if err == nil { + clearSticky := shouldClearStickySession(account) + if clearSticky { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + } + if !clearSticky && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy mixed routed sticky hit: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), accountID) + } + return account, nil } - if s.debugModelRoutingEnabled() { - log.Printf("[ModelRoutingDebug] legacy mixed routed sticky hit: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), accountID) - } - return account, nil } } } @@ -1648,12 +1816,18 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g if _, excluded := excludedIDs[accountID]; !excluded { account, err := s.getSchedulableAccount(ctx, accountID) // 检查账号分组归属和有效性:原生平台直接匹配,antigravity 需要启用混合调度 - if err == nil && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { - if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { - if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { - log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + if err == nil { + clearSticky := shouldClearStickySession(account) + if clearSticky { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + } + if !clearSticky && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + return account, nil } - return account, nil } } } @@ -1741,6 +1915,10 @@ func (s *GatewayService) isModelSupportedByAccount(account *Account, requestedMo if account.Platform == PlatformAnthropic { requestedModel = normalizeClaudeModelForAnthropic(requestedModel) } + // Gemini API Key 账户直接透传,由上游判断模型是否支持 + if account.Platform == PlatformGemini && account.Type == AccountTypeAPIKey { + return true + } // 其他平台使用账户的模型支持检查 return account.IsModelSupported(requestedModel) } @@ -2173,6 +2351,10 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A proxyURL = account.Proxy.URL() } + // 调试日志:记录即将转发的账号信息 + log.Printf("[Forward] Using account: ID=%d Name=%s Platform=%s Type=%s TLSFingerprint=%v Proxy=%s", + account.ID, account.Name, account.Platform, account.Type, account.IsTLSFingerprintEnabled(), proxyURL) + // 重试循环 var resp *http.Response retryStart := time.Now() @@ -2187,7 +2369,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A } // 发送请求 - resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + resp, err = s.httpUpstream.DoWithTLS(upstreamReq, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if err != nil { if resp != nil && resp.Body != nil { _ = resp.Body.Close() @@ -2261,7 +2443,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A filteredBody := FilterThinkingBlocksForRetry(body) retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) if buildErr == nil { - retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) + retryResp, retryErr := s.httpUpstream.DoWithTLS(retryReq, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if retryErr == nil { if retryResp.StatusCode < 400 { log.Printf("Account %d: signature error retry succeeded (thinking downgraded)", account.ID) @@ -2293,7 +2475,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A filteredBody2 := FilterSignatureSensitiveBlocksForRetry(body) retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel) if buildErr2 == nil { - retryResp2, retryErr2 := s.httpUpstream.Do(retryReq2, proxyURL, account.ID, account.Concurrency) + retryResp2, retryErr2 := s.httpUpstream.DoWithTLS(retryReq2, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if retryErr2 == nil { resp = retryResp2 break @@ -2408,6 +2590,10 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A _ = resp.Body.Close() resp.Body = io.NopCloser(bytes.NewReader(respBody)) + // 调试日志:打印重试耗尽后的错误响应 + log.Printf("[Forward] Upstream error (retry exhausted, failover): Account=%d(%s) Status=%d RequestID=%s Body=%s", + account.ID, account.Name, resp.StatusCode, resp.Header.Get("x-request-id"), truncateString(string(respBody), 1000)) + s.handleRetryExhaustedSideEffects(ctx, resp, account) appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ Platform: account.Platform, @@ -2435,6 +2621,10 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A _ = resp.Body.Close() resp.Body = io.NopCloser(bytes.NewReader(respBody)) + // 调试日志:打印上游错误响应 + log.Printf("[Forward] Upstream error (failover): Account=%d(%s) Status=%d RequestID=%s Body=%s", + account.ID, account.Name, resp.StatusCode, resp.Header.Get("x-request-id"), truncateString(string(respBody), 1000)) + s.handleFailoverSideEffects(ctx, resp, account) appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ Platform: account.Platform, @@ -2564,9 +2754,10 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex fingerprint = fp // 2. 重写metadata.user_id(需要指纹中的ClientID和账号的account_uuid) + // 如果启用了会话ID伪装,会在重写后替换 session 部分为固定值 accountUUID := account.GetExtraString("account_uuid") if accountUUID != "" && fp.ClientID != "" { - if newBody, err := s.identityService.RewriteUserID(body, account.ID, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 { + if newBody, err := s.identityService.RewriteUserIDWithMasking(ctx, body, account, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 { body = newBody } } @@ -2785,6 +2976,10 @@ func extractUpstreamErrorMessage(body []byte) string { func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + // 调试日志:打印上游错误响应 + log.Printf("[Forward] Upstream error (non-retryable): Account=%d(%s) Status=%d RequestID=%s Body=%s", + account.ID, account.Name, resp.StatusCode, resp.Header.Get("x-request-id"), truncateString(string(body), 1000)) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) @@ -3215,17 +3410,19 @@ func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { } `json:"usage"` } if json.Unmarshal([]byte(data), &msgDelta) == nil && msgDelta.Type == "message_delta" { - // output_tokens 总是从 message_delta 获取 - usage.OutputTokens = msgDelta.Usage.OutputTokens - - // 如果 message_start 中没有值,则从 message_delta 获取(兼容GLM等API) - if usage.InputTokens == 0 { + // message_delta 仅覆盖存在且非0的字段 + // 避免覆盖 message_start 中已有的值(如 input_tokens) + // Claude API 的 message_delta 通常只包含 output_tokens + if msgDelta.Usage.InputTokens > 0 { usage.InputTokens = msgDelta.Usage.InputTokens } - if usage.CacheCreationInputTokens == 0 { + if msgDelta.Usage.OutputTokens > 0 { + usage.OutputTokens = msgDelta.Usage.OutputTokens + } + if msgDelta.Usage.CacheCreationInputTokens > 0 { usage.CacheCreationInputTokens = msgDelta.Usage.CacheCreationInputTokens } - if usage.CacheReadInputTokens == 0 { + if msgDelta.Usage.CacheReadInputTokens > 0 { usage.CacheReadInputTokens = msgDelta.Usage.CacheReadInputTokens } } @@ -3505,7 +3702,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, } // 发送请求 - resp, err := s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + resp, err := s.httpUpstream.DoWithTLS(upstreamReq, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if err != nil { setOpsUpstreamError(c, 0, sanitizeUpstreamErrorMessage(err.Error()), "") s.countTokensError(c, http.StatusBadGateway, "upstream_error", "Request failed") @@ -3527,7 +3724,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, filteredBody := FilterThinkingBlocksForRetry(body) retryReq, buildErr := s.buildCountTokensRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) if buildErr == nil { - retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) + retryResp, retryErr := s.httpUpstream.DoWithTLS(retryReq, proxyURL, account.ID, account.Concurrency, account.IsTLSFingerprintEnabled()) if retryErr == nil { resp = retryResp respBody, err = io.ReadAll(resp.Body) @@ -3605,12 +3802,13 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // OAuth 账号:应用统一指纹和重写 userID + // 如果启用了会话ID伪装,会在重写后替换 session 部分为固定值 if account.IsOAuth() && s.identityService != nil { fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) if err == nil { accountUUID := account.GetExtraString("account_uuid") if accountUUID != "" && fp.ClientID != "" { - if newBody, err := s.identityService.RewriteUserID(body, account.ID, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 { + if newBody, err := s.identityService.RewriteUserIDWithMasking(ctx, body, account, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 { body = newBody } } diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 3833e66e..2e04c73c 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -82,70 +82,23 @@ func (s *GeminiMessagesCompatService) SelectAccountForModel(ctx context.Context, } func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*Account, error) { - // 优先检查 context 中的强制平台(/antigravity 路由) - var platform string - forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) - if hasForcePlatform && forcePlatform != "" { - platform = forcePlatform - } else if groupID != nil { - // 根据分组 platform 决定查询哪种账号 - var group *Group - if ctxGroup, ok := ctx.Value(ctxkey.Group).(*Group); ok && IsGroupContextValid(ctxGroup) && ctxGroup.ID == *groupID { - group = ctxGroup - } else { - var err error - group, err = s.groupRepo.GetByIDLite(ctx, *groupID) - if err != nil { - return nil, fmt.Errorf("get group failed: %w", err) - } - } - platform = group.Platform - } else { - // 无分组时只使用原生 gemini 平台 - platform = PlatformGemini + // 1. 确定目标平台和调度模式 + // Determine target platform and scheduling mode + platform, useMixedScheduling, hasForcePlatform, err := s.resolvePlatformAndSchedulingMode(ctx, groupID) + if err != nil { + return nil, err } - // gemini 分组支持混合调度(包含启用了 mixed_scheduling 的 antigravity 账户) - // 注意:强制平台模式不走混合调度 - useMixedScheduling := platform == PlatformGemini && !hasForcePlatform - cacheKey := "gemini:" + sessionHash - if sessionHash != "" { - accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), cacheKey) - if err == nil && accountID > 0 { - if _, excluded := excludedIDs[accountID]; !excluded { - account, err := s.getSchedulableAccount(ctx, accountID) - // 检查账号是否有效:原生平台直接匹配,antigravity 需要启用混合调度 - if err == nil && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { - valid := false - if account.Platform == platform { - valid = true - } else if useMixedScheduling && account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() { - valid = true - } - if valid { - usable := true - if s.rateLimitService != nil && requestedModel != "" { - ok, err := s.rateLimitService.PreCheckUsage(ctx, account, requestedModel) - if err != nil { - log.Printf("[Gemini PreCheck] Account %d precheck error: %v", account.ID, err) - } - if !ok { - usable = false - } - } - if usable { - _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), cacheKey, geminiStickySessionTTL) - return account, nil - } - } - } - } - } + // 2. 尝试粘性会话命中 + // Try sticky session hit + if account := s.tryStickySessionHit(ctx, groupID, sessionHash, cacheKey, requestedModel, excludedIDs, platform, useMixedScheduling); account != nil { + return account, nil } - // 查询可调度账户(强制平台模式:优先按分组查找,找不到再查全部) + // 3. 查询可调度账户(强制平台模式:优先按分组查找,找不到再查全部) + // Query schedulable accounts (force platform mode: try group first, fallback to all) accounts, err := s.listSchedulableAccountsOnce(ctx, groupID, platform, hasForcePlatform) if err != nil { return nil, fmt.Errorf("query accounts failed: %w", err) @@ -158,56 +111,9 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co } } - var selected *Account - for i := range accounts { - acc := &accounts[i] - if _, excluded := excludedIDs[acc.ID]; excluded { - continue - } - // 混合调度模式下:原生平台直接通过,antigravity 需要启用 mixed_scheduling - // 非混合调度模式(antigravity 分组):不需要过滤 - if useMixedScheduling && acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { - continue - } - if !acc.IsSchedulableForModel(requestedModel) { - continue - } - if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { - continue - } - if s.rateLimitService != nil && requestedModel != "" { - ok, err := s.rateLimitService.PreCheckUsage(ctx, acc, requestedModel) - if err != nil { - log.Printf("[Gemini PreCheck] Account %d precheck error: %v", acc.ID, err) - } - if !ok { - continue - } - } - if selected == nil { - selected = acc - continue - } - if acc.Priority < selected.Priority { - selected = acc - } else if acc.Priority == selected.Priority { - switch { - case acc.LastUsedAt == nil && selected.LastUsedAt != nil: - selected = acc - case acc.LastUsedAt != nil && selected.LastUsedAt == nil: - // keep selected (never used is preferred) - case acc.LastUsedAt == nil && selected.LastUsedAt == nil: - // Prefer OAuth accounts when both are unused (more compatible for Code Assist flows). - if acc.Type == AccountTypeOAuth && selected.Type != AccountTypeOAuth { - selected = acc - } - default: - if acc.LastUsedAt.Before(*selected.LastUsedAt) { - selected = acc - } - } - } - } + // 4. 按优先级 + LRU 选择最佳账号 + // Select best account by priority + LRU + selected := s.selectBestGeminiAccount(ctx, accounts, requestedModel, excludedIDs, platform, useMixedScheduling) if selected == nil { if requestedModel != "" { @@ -216,6 +122,8 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co return nil, errors.New("no available Gemini accounts") } + // 5. 设置粘性会话绑定 + // Set sticky session binding if sessionHash != "" { _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), cacheKey, selected.ID, geminiStickySessionTTL) } @@ -223,6 +131,229 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co return selected, nil } +// resolvePlatformAndSchedulingMode 解析目标平台和调度模式。 +// 返回:平台名称、是否使用混合调度、是否强制平台、错误。 +// +// resolvePlatformAndSchedulingMode resolves target platform and scheduling mode. +// Returns: platform name, whether to use mixed scheduling, whether force platform, error. +func (s *GeminiMessagesCompatService) resolvePlatformAndSchedulingMode(ctx context.Context, groupID *int64) (platform string, useMixedScheduling bool, hasForcePlatform bool, err error) { + // 优先检查 context 中的强制平台(/antigravity 路由) + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform != "" { + return forcePlatform, false, true, nil + } + + if groupID != nil { + // 根据分组 platform 决定查询哪种账号 + var group *Group + if ctxGroup, ok := ctx.Value(ctxkey.Group).(*Group); ok && IsGroupContextValid(ctxGroup) && ctxGroup.ID == *groupID { + group = ctxGroup + } else { + group, err = s.groupRepo.GetByIDLite(ctx, *groupID) + if err != nil { + return "", false, false, fmt.Errorf("get group failed: %w", err) + } + } + // gemini 分组支持混合调度(包含启用了 mixed_scheduling 的 antigravity 账户) + return group.Platform, group.Platform == PlatformGemini, false, nil + } + + // 无分组时只使用原生 gemini 平台 + return PlatformGemini, true, false, nil +} + +// tryStickySessionHit 尝试从粘性会话获取账号。 +// 如果命中且账号可用则返回账号;如果账号不可用则清理会话并返回 nil。 +// +// tryStickySessionHit attempts to get account from sticky session. +// Returns account if hit and usable; clears session and returns nil if account unavailable. +func (s *GeminiMessagesCompatService) tryStickySessionHit( + ctx context.Context, + groupID *int64, + sessionHash, cacheKey, requestedModel string, + excludedIDs map[int64]struct{}, + platform string, + useMixedScheduling bool, +) *Account { + if sessionHash == "" { + return nil + } + + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), cacheKey) + if err != nil || accountID <= 0 { + return nil + } + + if _, excluded := excludedIDs[accountID]; excluded { + return nil + } + + account, err := s.getSchedulableAccount(ctx, accountID) + if err != nil { + return nil + } + + // 检查账号是否需要清理粘性会话 + // Check if sticky session should be cleared + if shouldClearStickySession(account) { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), cacheKey) + return nil + } + + // 验证账号是否可用于当前请求 + // Verify account is usable for current request + if !s.isAccountUsableForRequest(ctx, account, requestedModel, platform, useMixedScheduling) { + return nil + } + + // 刷新会话 TTL 并返回账号 + // Refresh session TTL and return account + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), cacheKey, geminiStickySessionTTL) + return account +} + +// isAccountUsableForRequest 检查账号是否可用于当前请求。 +// 验证:模型调度、模型支持、平台匹配、速率限制预检。 +// +// isAccountUsableForRequest checks if account is usable for current request. +// Validates: model scheduling, model support, platform matching, rate limit precheck. +func (s *GeminiMessagesCompatService) isAccountUsableForRequest( + ctx context.Context, + account *Account, + requestedModel, platform string, + useMixedScheduling bool, +) bool { + // 检查模型调度能力 + // Check model scheduling capability + if !account.IsSchedulableForModel(requestedModel) { + return false + } + + // 检查模型支持 + // Check model support + if requestedModel != "" && !s.isModelSupportedByAccount(account, requestedModel) { + return false + } + + // 检查平台匹配 + // Check platform matching + if !s.isAccountValidForPlatform(account, platform, useMixedScheduling) { + return false + } + + // 速率限制预检 + // Rate limit precheck + if !s.passesRateLimitPreCheck(ctx, account, requestedModel) { + return false + } + + return true +} + +// isAccountValidForPlatform 检查账号是否匹配目标平台。 +// 原生平台直接匹配;混合调度模式下 antigravity 需要启用 mixed_scheduling。 +// +// isAccountValidForPlatform checks if account matches target platform. +// Native platform matches directly; mixed scheduling mode requires antigravity to enable mixed_scheduling. +func (s *GeminiMessagesCompatService) isAccountValidForPlatform(account *Account, platform string, useMixedScheduling bool) bool { + if account.Platform == platform { + return true + } + if useMixedScheduling && account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() { + return true + } + return false +} + +// passesRateLimitPreCheck 执行速率限制预检。 +// 返回 true 表示通过预检或无需预检。 +// +// passesRateLimitPreCheck performs rate limit precheck. +// Returns true if passed or precheck not required. +func (s *GeminiMessagesCompatService) passesRateLimitPreCheck(ctx context.Context, account *Account, requestedModel string) bool { + if s.rateLimitService == nil || requestedModel == "" { + return true + } + ok, err := s.rateLimitService.PreCheckUsage(ctx, account, requestedModel) + if err != nil { + log.Printf("[Gemini PreCheck] Account %d precheck error: %v", account.ID, err) + } + return ok +} + +// selectBestGeminiAccount 从候选账号中选择最佳账号(优先级 + LRU + OAuth 优先)。 +// 返回 nil 表示无可用账号。 +// +// selectBestGeminiAccount selects best account from candidates (priority + LRU + OAuth preferred). +// Returns nil if no available account. +func (s *GeminiMessagesCompatService) selectBestGeminiAccount( + ctx context.Context, + accounts []Account, + requestedModel string, + excludedIDs map[int64]struct{}, + platform string, + useMixedScheduling bool, +) *Account { + var selected *Account + + for i := range accounts { + acc := &accounts[i] + + // 跳过被排除的账号 + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + + // 检查账号是否可用于当前请求 + if !s.isAccountUsableForRequest(ctx, acc, requestedModel, platform, useMixedScheduling) { + continue + } + + // 选择最佳账号 + if selected == nil { + selected = acc + continue + } + + if s.isBetterGeminiAccount(acc, selected) { + selected = acc + } + } + + return selected +} + +// isBetterGeminiAccount 判断 candidate 是否比 current 更优。 +// 规则:优先级更高(数值更小)优先;同优先级时,未使用过的优先(OAuth > 非 OAuth),其次是最久未使用的。 +// +// isBetterGeminiAccount checks if candidate is better than current. +// Rules: higher priority (lower value) wins; same priority: never used (OAuth > non-OAuth) > least recently used. +func (s *GeminiMessagesCompatService) isBetterGeminiAccount(candidate, current *Account) bool { + // 优先级更高(数值更小) + if candidate.Priority < current.Priority { + return true + } + if candidate.Priority > current.Priority { + return false + } + + // 同优先级,比较最后使用时间 + switch { + case candidate.LastUsedAt == nil && current.LastUsedAt != nil: + // candidate 从未使用,优先 + return true + case candidate.LastUsedAt != nil && current.LastUsedAt == nil: + // current 从未使用,保持 + return false + case candidate.LastUsedAt == nil && current.LastUsedAt == nil: + // 都未使用,优先选择 OAuth 账号(更兼容 Code Assist 流程) + return candidate.Type == AccountTypeOAuth && current.Type != AccountTypeOAuth + default: + // 都使用过,选择最久未使用的 + return candidate.LastUsedAt.Before(*current.LastUsedAt) + } +} + // isModelSupportedByAccount 根据账户平台检查模型支持 func (s *GeminiMessagesCompatService) isModelSupportedByAccount(account *Account, requestedModel string) bool { if account.Platform == PlatformAntigravity { @@ -1864,6 +1995,7 @@ func collectGeminiSSE(body io.Reader, isOAuth bool) (map[string]any, *ClaudeUsag var last map[string]any var lastWithParts map[string]any + var collectedTextParts []string // Collect all text parts for aggregation usage := &ClaudeUsage{} for { @@ -1875,7 +2007,7 @@ func collectGeminiSSE(body io.Reader, isOAuth bool) (map[string]any, *ClaudeUsag switch payload { case "", "[DONE]": if payload == "[DONE]" { - return pickGeminiCollectResult(last, lastWithParts), usage, nil + return mergeCollectedTextParts(pickGeminiCollectResult(last, lastWithParts), collectedTextParts), usage, nil } default: var parsed map[string]any @@ -1894,6 +2026,12 @@ func collectGeminiSSE(body io.Reader, isOAuth bool) (map[string]any, *ClaudeUsag } if parts := extractGeminiParts(parsed); len(parts) > 0 { lastWithParts = parsed + // Collect text from each part for aggregation + for _, part := range parts { + if text, ok := part["text"].(string); ok && text != "" { + collectedTextParts = append(collectedTextParts, text) + } + } } } } @@ -1908,7 +2046,7 @@ func collectGeminiSSE(body io.Reader, isOAuth bool) (map[string]any, *ClaudeUsag } } - return pickGeminiCollectResult(last, lastWithParts), usage, nil + return mergeCollectedTextParts(pickGeminiCollectResult(last, lastWithParts), collectedTextParts), usage, nil } func pickGeminiCollectResult(last map[string]any, lastWithParts map[string]any) map[string]any { @@ -1921,6 +2059,83 @@ func pickGeminiCollectResult(last map[string]any, lastWithParts map[string]any) return map[string]any{} } +// mergeCollectedTextParts merges all collected text chunks into the final response. +// This fixes the issue where non-streaming responses only returned the last chunk +// instead of the complete aggregated text. +func mergeCollectedTextParts(response map[string]any, textParts []string) map[string]any { + if len(textParts) == 0 { + return response + } + + // Join all text parts + mergedText := strings.Join(textParts, "") + + // Deep copy response + result := make(map[string]any) + for k, v := range response { + result[k] = v + } + + // Get or create candidates + candidates, ok := result["candidates"].([]any) + if !ok || len(candidates) == 0 { + candidates = []any{map[string]any{}} + } + + // Get first candidate + candidate, ok := candidates[0].(map[string]any) + if !ok { + candidate = make(map[string]any) + candidates[0] = candidate + } + + // Get or create content + content, ok := candidate["content"].(map[string]any) + if !ok { + content = map[string]any{"role": "model"} + candidate["content"] = content + } + + // Get existing parts + existingParts, ok := content["parts"].([]any) + if !ok { + existingParts = []any{} + } + + // Find and update first text part, or create new one + newParts := make([]any, 0, len(existingParts)+1) + textUpdated := false + + for _, p := range existingParts { + pm, ok := p.(map[string]any) + if !ok { + newParts = append(newParts, p) + continue + } + if _, hasText := pm["text"]; hasText && !textUpdated { + // Replace with merged text + newPart := make(map[string]any) + for k, v := range pm { + newPart[k] = v + } + newPart["text"] = mergedText + newParts = append(newParts, newPart) + textUpdated = true + } else { + newParts = append(newParts, pm) + } + } + + if !textUpdated { + newParts = append([]any{map[string]any{"text": mergedText}}, newParts...) + } + + content["parts"] = newParts + result["candidates"] = candidates + + return result +} + type geminiNativeStreamResult struct { usage *ClaudeUsage firstTokenMs *int @@ -2312,9 +2527,13 @@ func extractGeminiUsage(geminiResp map[string]any) *ClaudeUsage { } prompt, _ := asInt(usageMeta["promptTokenCount"]) cand, _ := asInt(usageMeta["candidatesTokenCount"]) + cached, _ := asInt(usageMeta["cachedContentTokenCount"]) + // 注意:Gemini 的 promptTokenCount 包含 cachedContentTokenCount, + // 但 Claude 的 input_tokens 不包含 cache_read_input_tokens,需要减去 return &ClaudeUsage{ - InputTokens: prompt, - OutputTokens: cand, + InputTokens: prompt - cached, + OutputTokens: cand, + CacheReadInputTokens: cached, } } diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index 20640b01..c63a020c 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -15,8 +15,10 @@ import ( // mockAccountRepoForGemini Gemini 测试用的 mock type mockAccountRepoForGemini struct { - accounts []Account - accountsByID map[int64]*Account + accounts []Account + accountsByID map[int64]*Account + listByGroupFunc func(ctx context.Context, groupID int64, platforms []string) ([]Account, error) + listByPlatformFunc func(ctx context.Context, platforms []string) ([]Account, error) } func (m *mockAccountRepoForGemini) GetByID(ctx context.Context, id int64) (*Account, error) { @@ -81,9 +83,6 @@ func (m *mockAccountRepoForGemini) ListActive(ctx context.Context) ([]Account, e func (m *mockAccountRepoForGemini) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { return nil, nil } -func (m *mockAccountRepoForGemini) ListByPlatformAndCredentialEmails(ctx context.Context, platform string, emails []string) ([]Account, error) { - return nil, nil -} func (m *mockAccountRepoForGemini) UpdateLastUsed(ctx context.Context, id int64) error { return nil } func (m *mockAccountRepoForGemini) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { return nil @@ -110,6 +109,9 @@ func (m *mockAccountRepoForGemini) ListSchedulableByGroupID(ctx context.Context, return nil, nil } func (m *mockAccountRepoForGemini) ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]Account, error) { + if m.listByPlatformFunc != nil { + return m.listByPlatformFunc(ctx, platforms) + } var result []Account platformSet := make(map[string]bool) for _, p := range platforms { @@ -123,6 +125,9 @@ func (m *mockAccountRepoForGemini) ListSchedulableByPlatforms(ctx context.Contex return result, nil } func (m *mockAccountRepoForGemini) ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]Account, error) { + if m.listByGroupFunc != nil { + return m.listByGroupFunc(ctx, groupID, platforms) + } return m.ListSchedulableByPlatforms(ctx, platforms) } func (m *mockAccountRepoForGemini) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { @@ -218,6 +223,7 @@ var _ GroupRepository = (*mockGroupRepoForGemini)(nil) // mockGatewayCacheForGemini Gemini 测试用的 cache mock type mockGatewayCacheForGemini struct { sessionBindings map[string]int64 + deletedSessions map[string]int } func (m *mockGatewayCacheForGemini) GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) { @@ -239,6 +245,18 @@ func (m *mockGatewayCacheForGemini) RefreshSessionTTL(ctx context.Context, group return nil } +func (m *mockGatewayCacheForGemini) DeleteSessionAccountID(ctx context.Context, groupID int64, sessionHash string) error { + if m.sessionBindings == nil { + return nil + } + if m.deletedSessions == nil { + m.deletedSessions = make(map[string]int) + } + m.deletedSessions[sessionHash]++ + delete(m.sessionBindings, sessionHash) + return nil +} + // TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_GeminiPlatform 测试 Gemini 单平台选择 func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_GeminiPlatform(t *testing.T) { ctx := context.Background() @@ -529,6 +547,274 @@ func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_StickyS // 粘性会话未命中,按优先级选择 require.Equal(t, int64(2), acc.ID, "粘性会话未命中,应按优先级选择") }) + + t.Run("粘性会话不可调度-清理并回退选择", func(t *testing.T) { + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 2, Status: StatusDisabled, Schedulable: true}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{ + sessionBindings: map[string]int64{"gemini:session-123": 1}, + } + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "session-123", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + require.Equal(t, 1, cache.deletedSessions["gemini:session-123"]) + require.Equal(t, int64(2), cache.sessionBindings["gemini:session-123"]) + }) +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_ForcePlatformFallback(t *testing.T) { + ctx := context.Background() + groupID := int64(9) + ctx = context.WithValue(ctx, ctxkey.ForcePlatform, PlatformAntigravity) + + repo := &mockAccountRepoForGemini{ + listByGroupFunc: func(ctx context.Context, groupID int64, platforms []string) ([]Account, error) { + return nil, nil + }, + listByPlatformFunc: func(ctx context.Context, platforms []string) ([]Account, error) { + return []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, + }, nil + }, + accountsByID: map[int64]*Account{ + 1: {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_NoModelSupport(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + { + ID: 1, + Platform: PlatformGemini, + Priority: 1, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-1.0-pro": "gemini-1.0-pro"}}, + }, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-flash", nil) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "supporting model") +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_StickyMixedScheduling(t *testing.T) { + ctx := context.Background() + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + {ID: 2, Platform: PlatformGemini, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{ + sessionBindings: map[string]int64{"gemini:session-999": 1}, + } + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "session-999", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_SkipDisabledMixedScheduling(t *testing.T) { + ctx := context.Background() + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformGemini, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_ExcludedAccount(t *testing.T) { + ctx := context.Background() + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformGemini, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + excluded := map[int64]struct{}{1: {}} + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-flash", excluded) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_ListError(t *testing.T) { + ctx := context.Background() + repo := &mockAccountRepoForGemini{ + listByPlatformFunc: func(ctx context.Context, platforms []string) ([]Account, error) { + return nil, errors.New("query failed") + }, + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-flash", nil) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "query accounts failed") +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_PreferOAuth(t *testing.T) { + ctx := context.Background() + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeAPIKey}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-pro", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) +} + +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_PreferLeastRecentlyUsed(t *testing.T) { + ctx := context.Background() + oldTime := time.Now().Add(-2 * time.Hour) + newTime := time.Now().Add(-1 * time.Hour) + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: &newTime}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: &oldTime}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-pro", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) } // TestGeminiPlatformRouting_DocumentRouteDecision 测试平台路由决策逻辑 @@ -605,7 +891,7 @@ func TestGeminiMessagesCompatService_isModelSupportedByAccount(t *testing.T) { name: "Gemini平台-有映射配置-只支持配置的模型", account: &Account{ Platform: PlatformGemini, - Credentials: map[string]any{"model_mapping": map[string]any{"gemini-1.5-pro": "x"}}, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-2.5-pro": "x"}}, }, model: "gemini-2.5-flash", expected: false, diff --git a/backend/internal/service/gemini_native_signature_cleaner.go b/backend/internal/service/gemini_native_signature_cleaner.go new file mode 100644 index 00000000..b3352fb0 --- /dev/null +++ b/backend/internal/service/gemini_native_signature_cleaner.go @@ -0,0 +1,72 @@ +package service + +import ( + "encoding/json" +) + +// CleanGeminiNativeThoughtSignatures 从 Gemini 原生 API 请求中移除 thoughtSignature 字段, +// 以避免跨账号签名验证错误。 +// +// 当粘性会话切换账号时(例如原账号异常、不可调度等),旧账号返回的 thoughtSignature +// 会导致新账号的签名验证失败。通过移除这些签名,让新账号重新生成有效的签名。 +// +// CleanGeminiNativeThoughtSignatures removes thoughtSignature fields from Gemini native API requests +// to avoid cross-account signature validation errors. +// +// When sticky session switches accounts (e.g., original account becomes unavailable), +// thoughtSignatures from the old account will cause validation failures on the new account. +// By removing these signatures, we allow the new account to generate valid signatures. +func CleanGeminiNativeThoughtSignatures(body []byte) []byte { + if len(body) == 0 { + return body + } + + // 解析 JSON + var data any + if err := json.Unmarshal(body, &data); err != nil { + // 如果解析失败,返回原始 body(可能不是 JSON 或格式不正确) + return body + } + + // 递归清理 thoughtSignature + cleaned := cleanThoughtSignaturesRecursive(data) + + // 重新序列化 + result, err := json.Marshal(cleaned) + if err != nil { + // 如果序列化失败,返回原始 body + return body + } + + return result +} + +// cleanThoughtSignaturesRecursive 递归遍历数据结构,移除所有 thoughtSignature 字段 +func cleanThoughtSignaturesRecursive(data any) any { + switch v := data.(type) { + case map[string]any: + // 创建新的 map,移除 thoughtSignature + result := make(map[string]any, len(v)) + for key, value := range v { + // 跳过 thoughtSignature 字段 + if key == "thoughtSignature" { + continue + } + // 递归处理嵌套结构 + result[key] = cleanThoughtSignaturesRecursive(value) + } + return result + + case []any: + // 递归处理数组中的每个元素 + result := make([]any, len(v)) + for i, item := range v { + result[i] = cleanThoughtSignaturesRecursive(item) + } + return result + + default: + // 基本类型(string, number, bool, null)直接返回 + return v + } +} diff --git a/backend/internal/service/gemini_token_provider.go b/backend/internal/service/gemini_token_provider.go index f13ae169..313b048f 100644 --- a/backend/internal/service/gemini_token_provider.go +++ b/backend/internal/service/gemini_token_provider.go @@ -4,6 +4,7 @@ import ( "context" "errors" "log" + "log/slog" "strconv" "strings" "time" @@ -131,21 +132,32 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou } } - // 3) Populate cache with TTL. + // 3) Populate cache with TTL(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) if p.tokenCache != nil { - ttl := 30 * time.Minute - if expiresAt != nil { - until := time.Until(*expiresAt) - switch { - case until > geminiTokenCacheSkew: - ttl = until - geminiTokenCacheSkew - case until > 0: - ttl = until - default: - ttl = time.Minute + latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) + if isStale && latestAccount != nil { + // 版本过时,使用 DB 中的最新 token + slog.Debug("gemini_token_version_stale_use_latest", "account_id", account.ID) + accessToken = latestAccount.GetCredential("access_token") + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found after version check") } + // 不写入缓存,让下次请求重新处理 + } else { + ttl := 30 * time.Minute + if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > geminiTokenCacheSkew: + ttl = until - geminiTokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) } - _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) } return accessToken, nil diff --git a/backend/internal/service/http_upstream_port.go b/backend/internal/service/http_upstream_port.go index 9357f763..0e4cfbec 100644 --- a/backend/internal/service/http_upstream_port.go +++ b/backend/internal/service/http_upstream_port.go @@ -10,6 +10,7 @@ import "net/http" // - 支持可选代理配置 // - 支持账户级连接池隔离 // - 实现类负责连接池管理和复用 +// - 支持可选的 TLS 指纹伪装 type HTTPUpstream interface { // Do 执行 HTTP 请求 // @@ -27,4 +28,28 @@ type HTTPUpstream interface { // - 调用方必须关闭 resp.Body,否则会导致连接泄漏 // - 响应体可能已被包装以跟踪请求生命周期 Do(req *http.Request, proxyURL string, accountID int64, accountConcurrency int) (*http.Response, error) + + // DoWithTLS 执行带 TLS 指纹伪装的 HTTP 请求 + // + // 参数: + // - req: HTTP 请求对象,由调用方构建 + // - proxyURL: 代理服务器地址,空字符串表示直连 + // - accountID: 账户 ID,用于连接池隔离和 TLS 指纹模板选择 + // - accountConcurrency: 账户并发限制,用于动态调整连接池大小 + // - enableTLSFingerprint: 是否启用 TLS 指纹伪装 + // + // 返回: + // - *http.Response: HTTP 响应,调用方必须关闭 Body + // - error: 请求错误(网络错误、超时等) + // + // TLS 指纹说明: + // - 当 enableTLSFingerprint=true 时,使用 utls 库模拟 Claude CLI 的 TLS 指纹 + // - TLS 指纹模板根据 accountID % len(profiles) 自动选择 + // - 支持直连、HTTP/HTTPS 代理、SOCKS5 代理三种场景 + // - 如果 enableTLSFingerprint=false,行为与 Do 方法相同 + // + // 注意: + // - 调用方必须关闭 resp.Body,否则会导致连接泄漏 + // - TLS 指纹客户端与普通客户端使用不同的缓存键,互不影响 + DoWithTLS(req *http.Request, proxyURL string, accountID int64, accountConcurrency int, enableTLSFingerprint bool) (*http.Response, error) } diff --git a/backend/internal/service/identity_service.go b/backend/internal/service/identity_service.go index 1ffa8057..e2e723b0 100644 --- a/backend/internal/service/identity_service.go +++ b/backend/internal/service/identity_service.go @@ -8,9 +8,11 @@ import ( "encoding/json" "fmt" "log" + "log/slog" "net/http" "regexp" "strconv" + "strings" "time" ) @@ -49,6 +51,13 @@ type Fingerprint struct { type IdentityCache interface { GetFingerprint(ctx context.Context, accountID int64) (*Fingerprint, error) SetFingerprint(ctx context.Context, accountID int64, fp *Fingerprint) error + // GetMaskedSessionID 获取固定的会话ID(用于会话ID伪装功能) + // 返回的 sessionID 是一个 UUID 格式的字符串 + // 如果不存在或已过期(15分钟无请求),返回空字符串 + GetMaskedSessionID(ctx context.Context, accountID int64) (string, error) + // SetMaskedSessionID 设置固定的会话ID,TTL 为 15 分钟 + // 每次调用都会刷新 TTL + SetMaskedSessionID(ctx context.Context, accountID int64, sessionID string) error } // IdentityService 管理OAuth账号的请求身份指纹 @@ -203,6 +212,94 @@ func (s *IdentityService) RewriteUserID(body []byte, accountID int64, accountUUI return json.Marshal(reqMap) } +// RewriteUserIDWithMasking 重写body中的metadata.user_id,支持会话ID伪装 +// 如果账号启用了会话ID伪装(session_id_masking_enabled), +// 则在完成常规重写后,将 session 部分替换为固定的伪装ID(15分钟内保持不变) +func (s *IdentityService) RewriteUserIDWithMasking(ctx context.Context, body []byte, account *Account, accountUUID, cachedClientID string) ([]byte, error) { + // 先执行常规的 RewriteUserID 逻辑 + newBody, err := s.RewriteUserID(body, account.ID, accountUUID, cachedClientID) + if err != nil { + return newBody, err + } + + // 检查是否启用会话ID伪装 + if !account.IsSessionIDMaskingEnabled() { + return newBody, nil + } + + // 解析重写后的 body,提取 user_id + var reqMap map[string]any + if err := json.Unmarshal(newBody, &reqMap); err != nil { + return newBody, nil + } + + metadata, ok := reqMap["metadata"].(map[string]any) + if !ok { + return newBody, nil + } + + userID, ok := metadata["user_id"].(string) + if !ok || userID == "" { + return newBody, nil + } + + // 查找 _session_ 的位置,替换其后的内容 + const sessionMarker = "_session_" + idx := strings.LastIndex(userID, sessionMarker) + if idx == -1 { + return newBody, nil + } + + // 获取或生成固定的伪装 session ID + maskedSessionID, err := s.cache.GetMaskedSessionID(ctx, account.ID) + if err != nil { + log.Printf("Warning: failed to get masked session ID for account %d: %v", account.ID, err) + return newBody, nil + } + + if maskedSessionID == "" { + // 首次或已过期,生成新的伪装 session ID + maskedSessionID = generateRandomUUID() + log.Printf("Generated new masked session ID for account %d: %s", account.ID, maskedSessionID) + } + + // 刷新 TTL(每次请求都刷新,保持 15 分钟有效期) + if err := s.cache.SetMaskedSessionID(ctx, account.ID, maskedSessionID); err != nil { + log.Printf("Warning: failed to set masked session ID for account %d: %v", account.ID, err) + } + + // 替换 session 部分:保留 _session_ 之前的内容,替换之后的内容 + newUserID := userID[:idx+len(sessionMarker)] + maskedSessionID + + slog.Debug("session_id_masking_applied", + "account_id", account.ID, + "before", userID, + "after", newUserID, + ) + + metadata["user_id"] = newUserID + reqMap["metadata"] = metadata + + return json.Marshal(reqMap) +} + +// generateRandomUUID 生成随机 UUID v4 格式字符串 +func generateRandomUUID() string { + b := make([]byte, 16) + if _, err := rand.Read(b); err != nil { + // fallback: 使用时间戳生成 + h := sha256.Sum256([]byte(fmt.Sprintf("%d", time.Now().UnixNano()))) + b = h[:16] + } + + // 设置 UUID v4 版本和变体位 + b[6] = (b[6] & 0x0f) | 0x40 + b[8] = (b[8] & 0x3f) | 0x80 + + return fmt.Sprintf("%x-%x-%x-%x-%x", + b[0:4], b[4:6], b[6:8], b[8:10], b[10:16]) +} + // generateClientID 生成64位十六进制客户端ID(32字节随机数) func generateClientID() string { b := make([]byte, 32) diff --git a/backend/internal/service/oauth_service.go b/backend/internal/service/oauth_service.go index 0039cb44..15543080 100644 --- a/backend/internal/service/oauth_service.go +++ b/backend/internal/service/oauth_service.go @@ -48,8 +48,7 @@ type GenerateAuthURLResult struct { // GenerateAuthURL generates an OAuth authorization URL with full scope func (s *OAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64) (*GenerateAuthURLResult, error) { - scope := fmt.Sprintf("%s %s", oauth.ScopeProfile, oauth.ScopeInference) - return s.generateAuthURLWithScope(ctx, scope, proxyID) + return s.generateAuthURLWithScope(ctx, oauth.ScopeOAuth, proxyID) } // GenerateSetupTokenURL generates an OAuth authorization URL for setup token (inference only) @@ -123,6 +122,7 @@ type TokenInfo struct { Scope string `json:"scope,omitempty"` OrgUUID string `json:"org_uuid,omitempty"` AccountUUID string `json:"account_uuid,omitempty"` + EmailAddress string `json:"email_address,omitempty"` } // ExchangeCode exchanges authorization code for tokens @@ -176,7 +176,8 @@ func (s *OAuthService) CookieAuth(ctx context.Context, input *CookieAuthInput) ( } // Determine scope and if this is a setup token - scope := fmt.Sprintf("%s %s", oauth.ScopeProfile, oauth.ScopeInference) + // Internal API call uses ScopeAPI (org:create_api_key not supported) + scope := oauth.ScopeAPI isSetupToken := false if input.Scope == "inference" { scope = oauth.ScopeInference @@ -252,9 +253,15 @@ func (s *OAuthService) exchangeCodeForToken(ctx context.Context, code, codeVerif tokenInfo.OrgUUID = tokenResp.Organization.UUID log.Printf("[OAuth] Got org_uuid: %s", tokenInfo.OrgUUID) } - if tokenResp.Account != nil && tokenResp.Account.UUID != "" { - tokenInfo.AccountUUID = tokenResp.Account.UUID - log.Printf("[OAuth] Got account_uuid: %s", tokenInfo.AccountUUID) + if tokenResp.Account != nil { + if tokenResp.Account.UUID != "" { + tokenInfo.AccountUUID = tokenResp.Account.UUID + log.Printf("[OAuth] Got account_uuid: %s", tokenInfo.AccountUUID) + } + if tokenResp.Account.EmailAddress != "" { + tokenInfo.EmailAddress = tokenResp.Account.EmailAddress + log.Printf("[OAuth] Got email_address: %s", tokenInfo.EmailAddress) + } } return tokenInfo, nil diff --git a/backend/internal/service/openai_codex_transform.go b/backend/internal/service/openai_codex_transform.go index 264bdf95..48c72593 100644 --- a/backend/internal/service/openai_codex_transform.go +++ b/backend/internal/service/openai_codex_transform.go @@ -394,19 +394,35 @@ func normalizeCodexTools(reqBody map[string]any) bool { } modified := false - for idx, tool := range tools { + validTools := make([]any, 0, len(tools)) + + for _, tool := range tools { toolMap, ok := tool.(map[string]any) if !ok { + // Keep unknown structure as-is to avoid breaking upstream behavior. + validTools = append(validTools, tool) continue } toolType, _ := toolMap["type"].(string) - if strings.TrimSpace(toolType) != "function" { + toolType = strings.TrimSpace(toolType) + if toolType != "function" { + validTools = append(validTools, toolMap) continue } - function, ok := toolMap["function"].(map[string]any) - if !ok { + // OpenAI Responses-style tools use top-level name/parameters. + if name, ok := toolMap["name"].(string); ok && strings.TrimSpace(name) != "" { + validTools = append(validTools, toolMap) + continue + } + + // ChatCompletions-style tools use {type:"function", function:{...}}. + functionValue, hasFunction := toolMap["function"] + function, ok := functionValue.(map[string]any) + if !hasFunction || functionValue == nil || !ok || function == nil { + // Drop invalid function tools. + modified = true continue } @@ -435,11 +451,11 @@ func normalizeCodexTools(reqBody map[string]any) bool { } } - tools[idx] = toolMap + validTools = append(validTools, toolMap) } if modified { - reqBody["tools"] = tools + reqBody["tools"] = validTools } return modified diff --git a/backend/internal/service/openai_codex_transform_test.go b/backend/internal/service/openai_codex_transform_test.go index 0ff9485a..4cd72ab6 100644 --- a/backend/internal/service/openai_codex_transform_test.go +++ b/backend/internal/service/openai_codex_transform_test.go @@ -129,6 +129,37 @@ func TestFilterCodexInput_RemovesItemReferenceWhenNotPreserved(t *testing.T) { require.False(t, hasID) } +func TestApplyCodexOAuthTransform_NormalizeCodexTools_PreservesResponsesFunctionTools(t *testing.T) { + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "tools": []any{ + map[string]any{ + "type": "function", + "name": "bash", + "description": "desc", + "parameters": map[string]any{"type": "object"}, + }, + map[string]any{ + "type": "function", + "function": nil, + }, + }, + } + + applyCodexOAuthTransform(reqBody) + + tools, ok := reqBody["tools"].([]any) + require.True(t, ok) + require.Len(t, tools, 1) + + first, ok := tools[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "function", first["type"]) + require.Equal(t, "bash", first["name"]) +} + func TestApplyCodexOAuthTransform_EmptyInput(t *testing.T) { // 空 input 应保持为空且不触发异常。 setupCodexCache(t) diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index c7d94882..289a13af 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -60,6 +60,92 @@ type OpenAICodexUsageSnapshot struct { UpdatedAt string `json:"updated_at,omitempty"` } +// NormalizedCodexLimits contains normalized 5h/7d rate limit data +type NormalizedCodexLimits struct { + Used5hPercent *float64 + Reset5hSeconds *int + Window5hMinutes *int + Used7dPercent *float64 + Reset7dSeconds *int + Window7dMinutes *int +} + +// Normalize converts primary/secondary fields to canonical 5h/7d fields. +// Strategy: Compare window_minutes to determine which is 5h vs 7d. +// Returns nil if snapshot is nil or has no useful data. +func (s *OpenAICodexUsageSnapshot) Normalize() *NormalizedCodexLimits { + if s == nil { + return nil + } + + result := &NormalizedCodexLimits{} + + primaryMins := 0 + secondaryMins := 0 + hasPrimaryWindow := false + hasSecondaryWindow := false + + if s.PrimaryWindowMinutes != nil { + primaryMins = *s.PrimaryWindowMinutes + hasPrimaryWindow = true + } + if s.SecondaryWindowMinutes != nil { + secondaryMins = *s.SecondaryWindowMinutes + hasSecondaryWindow = true + } + + // Determine mapping based on window_minutes + use5hFromPrimary := false + use7dFromPrimary := false + + if hasPrimaryWindow && hasSecondaryWindow { + // Both known: smaller window is 5h, larger is 7d + if primaryMins < secondaryMins { + use5hFromPrimary = true + } else { + use7dFromPrimary = true + } + } else if hasPrimaryWindow { + // Only primary known: classify by threshold (<=360 min = 6h -> 5h window) + if primaryMins <= 360 { + use5hFromPrimary = true + } else { + use7dFromPrimary = true + } + } else if hasSecondaryWindow { + // Only secondary known: classify by threshold + if secondaryMins <= 360 { + // 5h from secondary, so primary (if any data) is 7d + use7dFromPrimary = true + } else { + // 7d from secondary, so primary (if any data) is 5h + use5hFromPrimary = true + } + } else { + // No window_minutes: fall back to legacy assumption (primary=7d, secondary=5h) + use7dFromPrimary = true + } + + // Assign values + if use5hFromPrimary { + result.Used5hPercent = s.PrimaryUsedPercent + result.Reset5hSeconds = s.PrimaryResetAfterSeconds + result.Window5hMinutes = s.PrimaryWindowMinutes + result.Used7dPercent = s.SecondaryUsedPercent + result.Reset7dSeconds = s.SecondaryResetAfterSeconds + result.Window7dMinutes = s.SecondaryWindowMinutes + } else if use7dFromPrimary { + result.Used7dPercent = s.PrimaryUsedPercent + result.Reset7dSeconds = s.PrimaryResetAfterSeconds + result.Window7dMinutes = s.PrimaryWindowMinutes + result.Used5hPercent = s.SecondaryUsedPercent + result.Reset5hSeconds = s.SecondaryResetAfterSeconds + result.Window5hMinutes = s.SecondaryWindowMinutes + } + + return result +} + // OpenAIUsage represents OpenAI API response usage type OpenAIUsage struct { InputTokens int `json:"input_tokens"` @@ -133,12 +219,30 @@ func NewOpenAIGatewayService( } } -// GenerateSessionHash generates session hash from header (OpenAI uses session_id header) -func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context) string { - sessionID := c.GetHeader("session_id") +// GenerateSessionHash generates a sticky-session hash for OpenAI requests. +// +// Priority: +// 1. Header: session_id +// 2. Header: conversation_id +// 3. Body: prompt_cache_key (opencode) +func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context, reqBody map[string]any) string { + if c == nil { + return "" + } + + sessionID := strings.TrimSpace(c.GetHeader("session_id")) + if sessionID == "" { + sessionID = strings.TrimSpace(c.GetHeader("conversation_id")) + } + if sessionID == "" && reqBody != nil { + if v, ok := reqBody["prompt_cache_key"].(string); ok { + sessionID = strings.TrimSpace(v) + } + } if sessionID == "" { return "" } + hash := sha256.Sum256([]byte(sessionID)) return hex.EncodeToString(hash[:]) } @@ -162,67 +266,26 @@ func (s *OpenAIGatewayService) SelectAccountForModel(ctx context.Context, groupI } // SelectAccountForModelWithExclusions selects an account supporting the requested model while excluding specified accounts. +// SelectAccountForModelWithExclusions 选择支持指定模型的账号,同时排除指定的账号。 func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*Account, error) { - // 1. Check sticky session - if sessionHash != "" { - accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) - if err == nil && accountID > 0 { - if _, excluded := excludedIDs[accountID]; !excluded { - account, err := s.getSchedulableAccount(ctx, accountID) - if err == nil && account.IsSchedulable() && account.IsOpenAI() && (requestedModel == "" || account.IsModelSupported(requestedModel)) { - // Refresh sticky session TTL - _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), "openai:"+sessionHash, openaiStickySessionTTL) - return account, nil - } - } - } + cacheKey := "openai:" + sessionHash + + // 1. 尝试粘性会话命中 + // Try sticky session hit + if account := s.tryStickySessionHit(ctx, groupID, sessionHash, cacheKey, requestedModel, excludedIDs); account != nil { + return account, nil } - // 2. Get schedulable OpenAI accounts + // 2. 获取可调度的 OpenAI 账号 + // Get schedulable OpenAI accounts accounts, err := s.listSchedulableAccounts(ctx, groupID) if err != nil { return nil, fmt.Errorf("query accounts failed: %w", err) } - // 3. Select by priority + LRU - var selected *Account - for i := range accounts { - acc := &accounts[i] - if _, excluded := excludedIDs[acc.ID]; excluded { - continue - } - // Scheduler snapshots can be temporarily stale; re-check schedulability here to - // avoid selecting accounts that were recently rate-limited/overloaded. - if !acc.IsSchedulable() { - continue - } - // Check model support - if requestedModel != "" && !acc.IsModelSupported(requestedModel) { - continue - } - if selected == nil { - selected = acc - continue - } - // Lower priority value means higher priority - if acc.Priority < selected.Priority { - selected = acc - } else if acc.Priority == selected.Priority { - switch { - case acc.LastUsedAt == nil && selected.LastUsedAt != nil: - selected = acc - case acc.LastUsedAt != nil && selected.LastUsedAt == nil: - // keep selected (never used is preferred) - case acc.LastUsedAt == nil && selected.LastUsedAt == nil: - // keep selected (both never used) - default: - // Same priority, select least recently used - if acc.LastUsedAt.Before(*selected.LastUsedAt) { - selected = acc - } - } - } - } + // 3. 按优先级 + LRU 选择最佳账号 + // Select by priority + LRU + selected := s.selectBestAccount(accounts, requestedModel, excludedIDs) if selected == nil { if requestedModel != "" { @@ -231,14 +294,138 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C return nil, errors.New("no available OpenAI accounts") } - // 4. Set sticky session + // 4. 设置粘性会话绑定 + // Set sticky session binding if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, selected.ID, openaiStickySessionTTL) + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), cacheKey, selected.ID, openaiStickySessionTTL) } return selected, nil } +// tryStickySessionHit 尝试从粘性会话获取账号。 +// 如果命中且账号可用则返回账号;如果账号不可用则清理会话并返回 nil。 +// +// tryStickySessionHit attempts to get account from sticky session. +// Returns account if hit and usable; clears session and returns nil if account is unavailable. +func (s *OpenAIGatewayService) tryStickySessionHit(ctx context.Context, groupID *int64, sessionHash, cacheKey, requestedModel string, excludedIDs map[int64]struct{}) *Account { + if sessionHash == "" { + return nil + } + + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), cacheKey) + if err != nil || accountID <= 0 { + return nil + } + + if _, excluded := excludedIDs[accountID]; excluded { + return nil + } + + account, err := s.getSchedulableAccount(ctx, accountID) + if err != nil { + return nil + } + + // 检查账号是否需要清理粘性会话 + // Check if sticky session should be cleared + if shouldClearStickySession(account) { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), cacheKey) + return nil + } + + // 验证账号是否可用于当前请求 + // Verify account is usable for current request + if !account.IsSchedulable() || !account.IsOpenAI() { + return nil + } + if requestedModel != "" && !account.IsModelSupported(requestedModel) { + return nil + } + + // 刷新会话 TTL 并返回账号 + // Refresh session TTL and return account + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), cacheKey, openaiStickySessionTTL) + return account +} + +// selectBestAccount 从候选账号中选择最佳账号(优先级 + LRU)。 +// 返回 nil 表示无可用账号。 +// +// selectBestAccount selects the best account from candidates (priority + LRU). +// Returns nil if no available account. +func (s *OpenAIGatewayService) selectBestAccount(accounts []Account, requestedModel string, excludedIDs map[int64]struct{}) *Account { + var selected *Account + + for i := range accounts { + acc := &accounts[i] + + // 跳过被排除的账号 + // Skip excluded accounts + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + + // 调度器快照可能暂时过时,这里重新检查可调度性和平台 + // Scheduler snapshots can be temporarily stale; re-check schedulability and platform + if !acc.IsSchedulable() || !acc.IsOpenAI() { + continue + } + + // 检查模型支持 + // Check model support + if requestedModel != "" && !acc.IsModelSupported(requestedModel) { + continue + } + + // 选择优先级最高且最久未使用的账号 + // Select highest priority and least recently used + if selected == nil { + selected = acc + continue + } + + if s.isBetterAccount(acc, selected) { + selected = acc + } + } + + return selected +} + +// isBetterAccount 判断 candidate 是否比 current 更优。 +// 规则:优先级更高(数值更小)优先;同优先级时,未使用过的优先,其次是最久未使用的。 +// +// isBetterAccount checks if candidate is better than current. +// Rules: higher priority (lower value) wins; same priority: never used > least recently used. +func (s *OpenAIGatewayService) isBetterAccount(candidate, current *Account) bool { + // 优先级更高(数值更小) + // Higher priority (lower value) + if candidate.Priority < current.Priority { + return true + } + if candidate.Priority > current.Priority { + return false + } + + // 同优先级,比较最后使用时间 + // Same priority, compare last used time + switch { + case candidate.LastUsedAt == nil && current.LastUsedAt != nil: + // candidate 从未使用,优先 + return true + case candidate.LastUsedAt != nil && current.LastUsedAt == nil: + // current 从未使用,保持 + return false + case candidate.LastUsedAt == nil && current.LastUsedAt == nil: + // 都未使用,保持 + return false + default: + // 都使用过,选择最久未使用的 + return candidate.LastUsedAt.Before(*current.LastUsedAt) + } +} + // SelectAccountWithLoadAwareness selects an account with load-awareness and wait plan. func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { cfg := s.schedulingConfig() @@ -307,29 +494,35 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) if err == nil && accountID > 0 && !isExcluded(accountID) { account, err := s.getSchedulableAccount(ctx, accountID) - if err == nil && account.IsSchedulable() && account.IsOpenAI() && - (requestedModel == "" || account.IsModelSupported(requestedModel)) { - result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) - if err == nil && result.Acquired { - _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), "openai:"+sessionHash, openaiStickySessionTTL) - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil + if err == nil { + clearSticky := shouldClearStickySession(account) + if clearSticky { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) } + if !clearSticky && account.IsSchedulable() && account.IsOpenAI() && + (requestedModel == "" || account.IsModelSupported(requestedModel)) { + result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if err == nil && result.Acquired { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), "openai:"+sessionHash, openaiStickySessionTTL) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } - waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) - if waitingCount < cfg.StickySessionMaxWaiting { - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: accountID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } } } } @@ -760,7 +953,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco // Extract and save Codex usage snapshot from response headers (for OAuth accounts) if account.Type == AccountTypeOAuth { - if snapshot := extractCodexUsageHeaders(resp.Header); snapshot != nil { + if snapshot := ParseCodexRateLimitHeaders(resp.Header); snapshot != nil { s.updateCodexUsageSnapshot(ctx, account.ID, snapshot) } } @@ -1558,8 +1751,9 @@ func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRec return nil } -// extractCodexUsageHeaders extracts Codex usage limits from response headers -func extractCodexUsageHeaders(headers http.Header) *OpenAICodexUsageSnapshot { +// ParseCodexRateLimitHeaders extracts Codex usage limits from response headers. +// Exported for use in ratelimit_service when handling OpenAI 429 responses. +func ParseCodexRateLimitHeaders(headers http.Header) *OpenAICodexUsageSnapshot { snapshot := &OpenAICodexUsageSnapshot{} hasData := false @@ -1633,6 +1827,8 @@ func (s *OpenAIGatewayService) updateCodexUsageSnapshot(ctx context.Context, acc // Convert snapshot to map for merging into Extra updates := make(map[string]any) + + // Save raw primary/secondary fields for debugging/tracing if snapshot.PrimaryUsedPercent != nil { updates["codex_primary_used_percent"] = *snapshot.PrimaryUsedPercent } @@ -1656,109 +1852,25 @@ func (s *OpenAIGatewayService) updateCodexUsageSnapshot(ctx context.Context, acc } updates["codex_usage_updated_at"] = snapshot.UpdatedAt - // Normalize to canonical 5h/7d fields based on window_minutes - // This fixes the issue where OpenAI's primary/secondary naming is reversed - // Strategy: Compare the two windows and assign the smaller one to 5h, larger one to 7d - - // IMPORTANT: We can only reliably determine window type from window_minutes field - // The reset_after_seconds is remaining time, not window size, so it cannot be used for comparison - - var primaryWindowMins, secondaryWindowMins int - var hasPrimaryWindow, hasSecondaryWindow bool - - // Only use window_minutes for reliable window size comparison - if snapshot.PrimaryWindowMinutes != nil { - primaryWindowMins = *snapshot.PrimaryWindowMinutes - hasPrimaryWindow = true - } - - if snapshot.SecondaryWindowMinutes != nil { - secondaryWindowMins = *snapshot.SecondaryWindowMinutes - hasSecondaryWindow = true - } - - // Determine which is 5h and which is 7d - var use5hFromPrimary, use7dFromPrimary bool - var use5hFromSecondary, use7dFromSecondary bool - - if hasPrimaryWindow && hasSecondaryWindow { - // Both window sizes known: compare and assign smaller to 5h, larger to 7d - if primaryWindowMins < secondaryWindowMins { - use5hFromPrimary = true - use7dFromSecondary = true - } else { - use5hFromSecondary = true - use7dFromPrimary = true + // Normalize to canonical 5h/7d fields + if normalized := snapshot.Normalize(); normalized != nil { + if normalized.Used5hPercent != nil { + updates["codex_5h_used_percent"] = *normalized.Used5hPercent } - } else if hasPrimaryWindow { - // Only primary window size known: classify by absolute threshold - if primaryWindowMins <= 360 { - use5hFromPrimary = true - } else { - use7dFromPrimary = true + if normalized.Reset5hSeconds != nil { + updates["codex_5h_reset_after_seconds"] = *normalized.Reset5hSeconds } - } else if hasSecondaryWindow { - // Only secondary window size known: classify by absolute threshold - if secondaryWindowMins <= 360 { - use5hFromSecondary = true - } else { - use7dFromSecondary = true + if normalized.Window5hMinutes != nil { + updates["codex_5h_window_minutes"] = *normalized.Window5hMinutes } - } else { - // No window_minutes available: cannot reliably determine window types - // Fall back to legacy assumption (may be incorrect) - // Assume primary=7d, secondary=5h based on historical observation - if snapshot.SecondaryUsedPercent != nil || snapshot.SecondaryResetAfterSeconds != nil || snapshot.SecondaryWindowMinutes != nil { - use5hFromSecondary = true + if normalized.Used7dPercent != nil { + updates["codex_7d_used_percent"] = *normalized.Used7dPercent } - if snapshot.PrimaryUsedPercent != nil || snapshot.PrimaryResetAfterSeconds != nil || snapshot.PrimaryWindowMinutes != nil { - use7dFromPrimary = true + if normalized.Reset7dSeconds != nil { + updates["codex_7d_reset_after_seconds"] = *normalized.Reset7dSeconds } - } - - // Write canonical 5h fields - if use5hFromPrimary { - if snapshot.PrimaryUsedPercent != nil { - updates["codex_5h_used_percent"] = *snapshot.PrimaryUsedPercent - } - if snapshot.PrimaryResetAfterSeconds != nil { - updates["codex_5h_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds - } - if snapshot.PrimaryWindowMinutes != nil { - updates["codex_5h_window_minutes"] = *snapshot.PrimaryWindowMinutes - } - } else if use5hFromSecondary { - if snapshot.SecondaryUsedPercent != nil { - updates["codex_5h_used_percent"] = *snapshot.SecondaryUsedPercent - } - if snapshot.SecondaryResetAfterSeconds != nil { - updates["codex_5h_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds - } - if snapshot.SecondaryWindowMinutes != nil { - updates["codex_5h_window_minutes"] = *snapshot.SecondaryWindowMinutes - } - } - - // Write canonical 7d fields - if use7dFromPrimary { - if snapshot.PrimaryUsedPercent != nil { - updates["codex_7d_used_percent"] = *snapshot.PrimaryUsedPercent - } - if snapshot.PrimaryResetAfterSeconds != nil { - updates["codex_7d_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds - } - if snapshot.PrimaryWindowMinutes != nil { - updates["codex_7d_window_minutes"] = *snapshot.PrimaryWindowMinutes - } - } else if use7dFromSecondary { - if snapshot.SecondaryUsedPercent != nil { - updates["codex_7d_used_percent"] = *snapshot.SecondaryUsedPercent - } - if snapshot.SecondaryResetAfterSeconds != nil { - updates["codex_7d_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds - } - if snapshot.SecondaryWindowMinutes != nil { - updates["codex_7d_window_minutes"] = *snapshot.SecondaryWindowMinutes + if normalized.Window7dMinutes != nil { + updates["codex_7d_window_minutes"] = *normalized.Window7dMinutes } } diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 42b88b7d..1912e244 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -21,19 +21,50 @@ type stubOpenAIAccountRepo struct { accounts []Account } +func (r stubOpenAIAccountRepo) GetByID(ctx context.Context, id int64) (*Account, error) { + for i := range r.accounts { + if r.accounts[i].ID == id { + return &r.accounts[i], nil + } + } + return nil, errors.New("account not found") +} + func (r stubOpenAIAccountRepo) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]Account, error) { - return append([]Account(nil), r.accounts...), nil + var result []Account + for _, acc := range r.accounts { + if acc.Platform == platform { + result = append(result, acc) + } + } + return result, nil } func (r stubOpenAIAccountRepo) ListSchedulableByPlatform(ctx context.Context, platform string) ([]Account, error) { - return append([]Account(nil), r.accounts...), nil + var result []Account + for _, acc := range r.accounts { + if acc.Platform == platform { + result = append(result, acc) + } + } + return result, nil } type stubConcurrencyCache struct { ConcurrencyCache + loadBatchErr error + loadMap map[int64]*AccountLoadInfo + acquireResults map[int64]bool + waitCounts map[int64]int + skipDefaultLoad bool } func (c stubConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { + if c.acquireResults != nil { + if result, ok := c.acquireResults[accountID]; ok { + return result, nil + } + } return true, nil } @@ -42,13 +73,118 @@ func (c stubConcurrencyCache) ReleaseAccountSlot(ctx context.Context, accountID } func (c stubConcurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + if c.loadBatchErr != nil { + return nil, c.loadBatchErr + } out := make(map[int64]*AccountLoadInfo, len(accounts)) + if c.skipDefaultLoad && c.loadMap != nil { + for _, acc := range accounts { + if load, ok := c.loadMap[acc.ID]; ok { + out[acc.ID] = load + } + } + return out, nil + } for _, acc := range accounts { + if c.loadMap != nil { + if load, ok := c.loadMap[acc.ID]; ok { + out[acc.ID] = load + continue + } + } out[acc.ID] = &AccountLoadInfo{AccountID: acc.ID, LoadRate: 0} } return out, nil } +func TestOpenAIGatewayService_GenerateSessionHash_Priority(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + + svc := &OpenAIGatewayService{} + + // 1) session_id header wins + c.Request.Header.Set("session_id", "sess-123") + c.Request.Header.Set("conversation_id", "conv-456") + h1 := svc.GenerateSessionHash(c, map[string]any{"prompt_cache_key": "ses_aaa"}) + if h1 == "" { + t.Fatalf("expected non-empty hash") + } + + // 2) conversation_id used when session_id absent + c.Request.Header.Del("session_id") + h2 := svc.GenerateSessionHash(c, map[string]any{"prompt_cache_key": "ses_aaa"}) + if h2 == "" { + t.Fatalf("expected non-empty hash") + } + if h1 == h2 { + t.Fatalf("expected different hashes for different keys") + } + + // 3) prompt_cache_key used when both headers absent + c.Request.Header.Del("conversation_id") + h3 := svc.GenerateSessionHash(c, map[string]any{"prompt_cache_key": "ses_aaa"}) + if h3 == "" { + t.Fatalf("expected non-empty hash") + } + if h2 == h3 { + t.Fatalf("expected different hashes for different keys") + } + + // 4) empty when no signals + h4 := svc.GenerateSessionHash(c, map[string]any{}) + if h4 != "" { + t.Fatalf("expected empty hash when no signals") + } +} + +func (c stubConcurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + if c.waitCounts != nil { + if count, ok := c.waitCounts[accountID]; ok { + return count, nil + } + } + return 0, nil +} + +type stubGatewayCache struct { + sessionBindings map[string]int64 + deletedSessions map[string]int +} + +func (c *stubGatewayCache) GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) { + if id, ok := c.sessionBindings[sessionHash]; ok { + return id, nil + } + return 0, errors.New("not found") +} + +func (c *stubGatewayCache) SetSessionAccountID(ctx context.Context, groupID int64, sessionHash string, accountID int64, ttl time.Duration) error { + if c.sessionBindings == nil { + c.sessionBindings = make(map[string]int64) + } + c.sessionBindings[sessionHash] = accountID + return nil +} + +func (c *stubGatewayCache) RefreshSessionTTL(ctx context.Context, groupID int64, sessionHash string, ttl time.Duration) error { + return nil +} + +func (c *stubGatewayCache) DeleteSessionAccountID(ctx context.Context, groupID int64, sessionHash string) error { + if c.sessionBindings == nil { + return nil + } + if c.deletedSessions == nil { + c.deletedSessions = make(map[string]int) + } + c.deletedSessions[sessionHash]++ + delete(c.sessionBindings, sessionHash) + return nil +} + func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulable(t *testing.T) { now := time.Now() resetAt := now.Add(10 * time.Minute) @@ -139,6 +275,515 @@ func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulableWhenNoConcurre } } +func TestOpenAISelectAccountForModelWithExclusions_StickyUnschedulableClearsSession(t *testing.T) { + sessionHash := "session-1" + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusDisabled, Schedulable: true, Concurrency: 1}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1}, + }, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{"openai:" + sessionHash: 1}, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(context.Background(), nil, sessionHash, "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountForModelWithExclusions error: %v", err) + } + if acc == nil || acc.ID != 2 { + t.Fatalf("expected account 2, got %+v", acc) + } + if cache.deletedSessions["openai:"+sessionHash] != 1 { + t.Fatalf("expected sticky session to be deleted") + } + if cache.sessionBindings["openai:"+sessionHash] != 2 { + t.Fatalf("expected sticky session to bind to account 2") + } +} + +func TestOpenAISelectAccountWithLoadAwareness_StickyUnschedulableClearsSession(t *testing.T) { + sessionHash := "session-2" + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusDisabled, Schedulable: true, Concurrency: 1}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1}, + }, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{"openai:" + sessionHash: 1}, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, sessionHash, "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil || selection.Account.ID != 2 { + t.Fatalf("expected account 2, got %+v", selection) + } + if cache.deletedSessions["openai:"+sessionHash] != 1 { + t.Fatalf("expected sticky session to be deleted") + } + if cache.sessionBindings["openai:"+sessionHash] != 2 { + t.Fatalf("expected sticky session to bind to account 2") + } + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAISelectAccountForModelWithExclusions_NoModelSupport(t *testing.T) { + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + { + ID: 1, + Platform: PlatformOpenAI, + Status: StatusActive, + Schedulable: true, + Credentials: map[string]any{"model_mapping": map[string]any{"gpt-3.5-turbo": "gpt-3.5-turbo"}}, + }, + }, + } + cache := &stubGatewayCache{} + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(context.Background(), nil, "", "gpt-4", nil) + if err == nil { + t.Fatalf("expected error for unsupported model") + } + if acc != nil { + t.Fatalf("expected nil account for unsupported model") + } + if !strings.Contains(err.Error(), "supporting model") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestOpenAISelectAccountWithLoadAwareness_LoadBatchErrorFallback(t *testing.T) { + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 2}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{ + loadBatchErr: errors.New("load batch failed"), + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "fallback", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil { + t.Fatalf("expected selection") + } + if selection.Account.ID != 2 { + t.Fatalf("expected account 2, got %d", selection.Account.ID) + } + if cache.sessionBindings["openai:fallback"] != 2 { + t.Fatalf("expected sticky session updated") + } + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAISelectAccountWithLoadAwareness_NoSlotFallbackWait(t *testing.T) { + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{ + acquireResults: map[int64]bool{1: false}, + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 10}, + }, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.WaitPlan == nil { + t.Fatalf("expected wait plan fallback") + } + if selection.Account == nil || selection.Account.ID != 1 { + t.Fatalf("expected account 1") + } +} + +func TestOpenAISelectAccountForModelWithExclusions_SetsStickyBinding(t *testing.T) { + sessionHash := "bind" + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(context.Background(), nil, sessionHash, "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountForModelWithExclusions error: %v", err) + } + if acc == nil || acc.ID != 1 { + t.Fatalf("expected account 1") + } + if cache.sessionBindings["openai:"+sessionHash] != 1 { + t.Fatalf("expected sticky session binding") + } +} + +func TestOpenAISelectAccountWithLoadAwareness_StickyWaitPlan(t *testing.T) { + sessionHash := "sticky-wait" + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{"openai:" + sessionHash: 1}, + } + concurrencyCache := stubConcurrencyCache{ + acquireResults: map[int64]bool{1: false}, + waitCounts: map[int64]int{1: 0}, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, sessionHash, "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.WaitPlan == nil { + t.Fatalf("expected sticky wait plan") + } + if selection.Account == nil || selection.Account.ID != 1 { + t.Fatalf("expected account 1") + } +} + +func TestOpenAISelectAccountWithLoadAwareness_PrefersLowerLoad(t *testing.T) { + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 80}, + 2: {AccountID: 2, LoadRate: 10}, + }, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "load", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil || selection.Account.ID != 2 { + t.Fatalf("expected account 2") + } + if cache.sessionBindings["openai:load"] != 2 { + t.Fatalf("expected sticky session updated") + } +} + +func TestOpenAISelectAccountForModelWithExclusions_StickyExcludedFallback(t *testing.T) { + sessionHash := "excluded" + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 2}, + }, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{"openai:" + sessionHash: 1}, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + } + + excluded := map[int64]struct{}{1: {}} + acc, err := svc.SelectAccountForModelWithExclusions(context.Background(), nil, sessionHash, "gpt-4", excluded) + if err != nil { + t.Fatalf("SelectAccountForModelWithExclusions error: %v", err) + } + if acc == nil || acc.ID != 2 { + t.Fatalf("expected account 2") + } +} + +func TestOpenAISelectAccountForModelWithExclusions_StickyNonOpenAI(t *testing.T) { + sessionHash := "non-openai" + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 2}, + }, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{"openai:" + sessionHash: 1}, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(context.Background(), nil, sessionHash, "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountForModelWithExclusions error: %v", err) + } + if acc == nil || acc.ID != 2 { + t.Fatalf("expected account 2") + } +} + +func TestOpenAISelectAccountForModelWithExclusions_NoAccounts(t *testing.T) { + repo := stubOpenAIAccountRepo{accounts: []Account{}} + cache := &stubGatewayCache{} + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(context.Background(), nil, "", "", nil) + if err == nil { + t.Fatalf("expected error for no accounts") + } + if acc != nil { + t.Fatalf("expected nil account") + } + if !strings.Contains(err.Error(), "no available OpenAI accounts") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestOpenAISelectAccountWithLoadAwareness_NoCandidates(t *testing.T) { + groupID := int64(1) + resetAt := time.Now().Add(1 * time.Hour) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1, RateLimitResetAt: &resetAt}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{} + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-4", nil) + if err == nil { + t.Fatalf("expected error for no candidates") + } + if selection != nil { + t.Fatalf("expected nil selection") + } +} + +func TestOpenAISelectAccountWithLoadAwareness_AllFullWaitPlan(t *testing.T) { + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 100}, + }, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.WaitPlan == nil { + t.Fatalf("expected wait plan") + } +} + +func TestOpenAISelectAccountWithLoadAwareness_LoadBatchErrorNoAcquire(t *testing.T) { + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{ + loadBatchErr: errors.New("load batch failed"), + acquireResults: map[int64]bool{1: false}, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.WaitPlan == nil { + t.Fatalf("expected wait plan") + } +} + +func TestOpenAISelectAccountWithLoadAwareness_MissingLoadInfo(t *testing.T) { + groupID := int64(1) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 50}, + }, + skipDefaultLoad: true, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil || selection.Account.ID != 2 { + t.Fatalf("expected account 2") + } +} + +func TestOpenAISelectAccountForModelWithExclusions_LeastRecentlyUsed(t *testing.T) { + oldTime := time.Now().Add(-2 * time.Hour) + newTime := time.Now().Add(-1 * time.Hour) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Priority: 1, LastUsedAt: &newTime}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Priority: 1, LastUsedAt: &oldTime}, + }, + } + cache := &stubGatewayCache{} + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(context.Background(), nil, "", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountForModelWithExclusions error: %v", err) + } + if acc == nil || acc.ID != 2 { + t.Fatalf("expected account 2") + } +} + +func TestOpenAISelectAccountWithLoadAwareness_PreferNeverUsed(t *testing.T) { + groupID := int64(1) + lastUsed := time.Now().Add(-1 * time.Hour) + repo := stubOpenAIAccountRepo{ + accounts: []Account{ + {ID: 1, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1, LastUsedAt: &lastUsed}, + {ID: 2, Platform: PlatformOpenAI, Status: StatusActive, Schedulable: true, Concurrency: 1, Priority: 1}, + }, + } + cache := &stubGatewayCache{} + concurrencyCache := stubConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 1: {AccountID: 1, LoadRate: 10}, + 2: {AccountID: 2, LoadRate: 10}, + }, + } + + svc := &OpenAIGatewayService{ + accountRepo: repo, + cache: cache, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-4", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil || selection.Account.ID != 2 { + t.Fatalf("expected account 2") + } +} + func TestOpenAIStreamingTimeout(t *testing.T) { gin.SetMode(gin.TestMode) cfg := &config.Config{ diff --git a/backend/internal/service/openai_oauth_service.go b/backend/internal/service/openai_oauth_service.go index 182e08fe..ca7470b9 100644 --- a/backend/internal/service/openai_oauth_service.go +++ b/backend/internal/service/openai_oauth_service.go @@ -2,9 +2,10 @@ package service import ( "context" - "fmt" + "net/http" "time" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/openai" ) @@ -35,12 +36,12 @@ func (s *OpenAIOAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64 // Generate PKCE values state, err := openai.GenerateState() if err != nil { - return nil, fmt.Errorf("failed to generate state: %w", err) + return nil, infraerrors.Newf(http.StatusInternalServerError, "OPENAI_OAUTH_STATE_FAILED", "failed to generate state: %v", err) } codeVerifier, err := openai.GenerateCodeVerifier() if err != nil { - return nil, fmt.Errorf("failed to generate code verifier: %w", err) + return nil, infraerrors.Newf(http.StatusInternalServerError, "OPENAI_OAUTH_VERIFIER_FAILED", "failed to generate code verifier: %v", err) } codeChallenge := openai.GenerateCodeChallenge(codeVerifier) @@ -48,14 +49,17 @@ func (s *OpenAIOAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64 // Generate session ID sessionID, err := openai.GenerateSessionID() if err != nil { - return nil, fmt.Errorf("failed to generate session ID: %w", err) + return nil, infraerrors.Newf(http.StatusInternalServerError, "OPENAI_OAUTH_SESSION_FAILED", "failed to generate session ID: %v", err) } // Get proxy URL if specified var proxyURL string if proxyID != nil { proxy, err := s.proxyRepo.GetByID(ctx, *proxyID) - if err == nil && proxy != nil { + if err != nil { + return nil, infraerrors.Newf(http.StatusBadRequest, "OPENAI_OAUTH_PROXY_NOT_FOUND", "proxy not found: %v", err) + } + if proxy != nil { proxyURL = proxy.URL() } } @@ -110,14 +114,17 @@ func (s *OpenAIOAuthService) ExchangeCode(ctx context.Context, input *OpenAIExch // Get session session, ok := s.sessionStore.Get(input.SessionID) if !ok { - return nil, fmt.Errorf("session not found or expired") + return nil, infraerrors.New(http.StatusBadRequest, "OPENAI_OAUTH_SESSION_NOT_FOUND", "session not found or expired") } - // Get proxy URL + // Get proxy URL: prefer input.ProxyID, fallback to session.ProxyURL proxyURL := session.ProxyURL if input.ProxyID != nil { proxy, err := s.proxyRepo.GetByID(ctx, *input.ProxyID) - if err == nil && proxy != nil { + if err != nil { + return nil, infraerrors.Newf(http.StatusBadRequest, "OPENAI_OAUTH_PROXY_NOT_FOUND", "proxy not found: %v", err) + } + if proxy != nil { proxyURL = proxy.URL() } } @@ -131,7 +138,7 @@ func (s *OpenAIOAuthService) ExchangeCode(ctx context.Context, input *OpenAIExch // Exchange code for token tokenResp, err := s.oauthClient.ExchangeCode(ctx, input.Code, session.CodeVerifier, redirectURI, proxyURL) if err != nil { - return nil, fmt.Errorf("failed to exchange code: %w", err) + return nil, err } // Parse ID token to get user info @@ -201,12 +208,12 @@ func (s *OpenAIOAuthService) RefreshToken(ctx context.Context, refreshToken stri // RefreshAccountToken refreshes token for an OpenAI account func (s *OpenAIOAuthService) RefreshAccountToken(ctx context.Context, account *Account) (*OpenAITokenInfo, error) { if !account.IsOpenAI() { - return nil, fmt.Errorf("account is not an OpenAI account") + return nil, infraerrors.New(http.StatusBadRequest, "OPENAI_OAUTH_INVALID_ACCOUNT", "account is not an OpenAI account") } refreshToken := account.GetOpenAIRefreshToken() if refreshToken == "" { - return nil, fmt.Errorf("no refresh token available") + return nil, infraerrors.New(http.StatusBadRequest, "OPENAI_OAUTH_NO_REFRESH_TOKEN", "no refresh token available") } var proxyURL string diff --git a/backend/internal/service/openai_token_provider.go b/backend/internal/service/openai_token_provider.go index 82a0866f..87a7713b 100644 --- a/backend/internal/service/openai_token_provider.go +++ b/backend/internal/service/openai_token_provider.go @@ -162,26 +162,37 @@ func (p *OpenAITokenProvider) GetAccessToken(ctx context.Context, account *Accou return "", errors.New("access_token not found in credentials") } - // 3. 存入缓存 + // 3. 存入缓存(验证版本后再写入,避免异步刷新任务与请求线程的竞态条件) if p.tokenCache != nil { - ttl := 30 * time.Minute - if refreshFailed { - // 刷新失败时使用短 TTL,避免失效 token 长时间缓存导致 401 抖动 - ttl = time.Minute - slog.Debug("openai_token_cache_short_ttl", "account_id", account.ID, "reason", "refresh_failed") - } else if expiresAt != nil { - until := time.Until(*expiresAt) - switch { - case until > openAITokenCacheSkew: - ttl = until - openAITokenCacheSkew - case until > 0: - ttl = until - default: - ttl = time.Minute + latestAccount, isStale := CheckTokenVersion(ctx, account, p.accountRepo) + if isStale && latestAccount != nil { + // 版本过时,使用 DB 中的最新 token + slog.Debug("openai_token_version_stale_use_latest", "account_id", account.ID) + accessToken = latestAccount.GetOpenAIAccessToken() + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found after version check") + } + // 不写入缓存,让下次请求重新处理 + } else { + ttl := 30 * time.Minute + if refreshFailed { + // 刷新失败时使用短 TTL,避免失效 token 长时间缓存导致 401 抖动 + ttl = time.Minute + slog.Debug("openai_token_cache_short_ttl", "account_id", account.ID, "reason", "refresh_failed") + } else if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > openAITokenCacheSkew: + ttl = until - openAITokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + if err := p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl); err != nil { + slog.Warn("openai_token_cache_set_failed", "account_id", account.ID, "error", err) } - } - if err := p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl); err != nil { - slog.Warn("openai_token_cache_set_failed", "account_id", account.ID, "error", err) } } diff --git a/backend/internal/service/openai_tool_corrector.go b/backend/internal/service/openai_tool_corrector.go index 9c9eab84..f4719275 100644 --- a/backend/internal/service/openai_tool_corrector.go +++ b/backend/internal/service/openai_tool_corrector.go @@ -27,6 +27,11 @@ var codexToolNameMapping = map[string]string{ "executeBash": "bash", "exec_bash": "bash", "execBash": "bash", + + // Some clients output generic fetch names. + "fetch": "webfetch", + "web_fetch": "webfetch", + "webFetch": "webfetch", } // ToolCorrectionStats 记录工具修正的统计信息(导出用于 JSON 序列化) @@ -208,27 +213,67 @@ func (c *CodexToolCorrector) correctToolParameters(toolName string, functionCall // 根据工具名称应用特定的参数修正规则 switch toolName { case "bash": - // 移除 workdir 参数(OpenCode 不支持) - if _, exists := argsMap["workdir"]; exists { - delete(argsMap, "workdir") - corrected = true - log.Printf("[CodexToolCorrector] Removed 'workdir' parameter from bash tool") - } - if _, exists := argsMap["work_dir"]; exists { - delete(argsMap, "work_dir") - corrected = true - log.Printf("[CodexToolCorrector] Removed 'work_dir' parameter from bash tool") + // OpenCode bash 支持 workdir;有些来源会输出 work_dir。 + if _, hasWorkdir := argsMap["workdir"]; !hasWorkdir { + if workDir, exists := argsMap["work_dir"]; exists { + argsMap["workdir"] = workDir + delete(argsMap, "work_dir") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'work_dir' to 'workdir' in bash tool") + } + } else { + if _, exists := argsMap["work_dir"]; exists { + delete(argsMap, "work_dir") + corrected = true + log.Printf("[CodexToolCorrector] Removed duplicate 'work_dir' parameter from bash tool") + } } case "edit": - // OpenCode edit 使用 old_string/new_string,Codex 可能使用其他名称 - // 这里可以添加参数名称的映射逻辑 - if _, exists := argsMap["file_path"]; !exists { - if path, exists := argsMap["path"]; exists { - argsMap["file_path"] = path + // OpenCode edit 参数为 filePath/oldString/newString(camelCase)。 + if _, exists := argsMap["filePath"]; !exists { + if filePath, exists := argsMap["file_path"]; exists { + argsMap["filePath"] = filePath + delete(argsMap, "file_path") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'file_path' to 'filePath' in edit tool") + } else if filePath, exists := argsMap["path"]; exists { + argsMap["filePath"] = filePath delete(argsMap, "path") corrected = true - log.Printf("[CodexToolCorrector] Renamed 'path' to 'file_path' in edit tool") + log.Printf("[CodexToolCorrector] Renamed 'path' to 'filePath' in edit tool") + } else if filePath, exists := argsMap["file"]; exists { + argsMap["filePath"] = filePath + delete(argsMap, "file") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'file' to 'filePath' in edit tool") + } + } + + if _, exists := argsMap["oldString"]; !exists { + if oldString, exists := argsMap["old_string"]; exists { + argsMap["oldString"] = oldString + delete(argsMap, "old_string") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'old_string' to 'oldString' in edit tool") + } + } + + if _, exists := argsMap["newString"]; !exists { + if newString, exists := argsMap["new_string"]; exists { + argsMap["newString"] = newString + delete(argsMap, "new_string") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'new_string' to 'newString' in edit tool") + } + } + + if _, exists := argsMap["replaceAll"]; !exists { + if replaceAll, exists := argsMap["replace_all"]; exists { + argsMap["replaceAll"] = replaceAll + delete(argsMap, "replace_all") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'replace_all' to 'replaceAll' in edit tool") } } } diff --git a/backend/internal/service/openai_tool_corrector_test.go b/backend/internal/service/openai_tool_corrector_test.go index 3e885b4b..ff518ea6 100644 --- a/backend/internal/service/openai_tool_corrector_test.go +++ b/backend/internal/service/openai_tool_corrector_test.go @@ -416,22 +416,23 @@ func TestCorrectToolParameters(t *testing.T) { expected map[string]bool // key: 期待存在的参数, value: true表示应该存在 }{ { - name: "remove workdir from bash tool", + name: "rename work_dir to workdir in bash tool", input: `{ "tool_calls": [{ "function": { "name": "bash", - "arguments": "{\"command\":\"ls\",\"workdir\":\"/tmp\"}" + "arguments": "{\"command\":\"ls\",\"work_dir\":\"/tmp\"}" } }] }`, expected: map[string]bool{ - "command": true, - "workdir": false, + "command": true, + "workdir": true, + "work_dir": false, }, }, { - name: "rename path to file_path in edit tool", + name: "rename snake_case edit params to camelCase", input: `{ "tool_calls": [{ "function": { @@ -441,10 +442,12 @@ func TestCorrectToolParameters(t *testing.T) { }] }`, expected: map[string]bool{ - "file_path": true, + "filePath": true, "path": false, - "old_string": true, - "new_string": true, + "oldString": true, + "old_string": false, + "newString": true, + "new_string": false, }, }, } diff --git a/backend/internal/service/ops_settings_models.go b/backend/internal/service/ops_settings_models.go index df06f578..ecc62220 100644 --- a/backend/internal/service/ops_settings_models.go +++ b/backend/internal/service/ops_settings_models.go @@ -83,6 +83,7 @@ type OpsAdvancedSettings struct { IgnoreCountTokensErrors bool `json:"ignore_count_tokens_errors"` IgnoreContextCanceled bool `json:"ignore_context_canceled"` IgnoreNoAvailableAccounts bool `json:"ignore_no_available_accounts"` + IgnoreInvalidApiKeyErrors bool `json:"ignore_invalid_api_key_errors"` AutoRefreshEnabled bool `json:"auto_refresh_enabled"` AutoRefreshIntervalSec int `json:"auto_refresh_interval_seconds"` } diff --git a/backend/internal/service/pricing_service.go b/backend/internal/service/pricing_service.go index 392fb65c..0ade72cd 100644 --- a/backend/internal/service/pricing_service.go +++ b/backend/internal/service/pricing_service.go @@ -531,8 +531,8 @@ func (s *PricingService) buildModelLookupCandidates(modelLower string) []string func normalizeModelNameForPricing(model string) string { // Common Gemini/VertexAI forms: // - models/gemini-2.0-flash-exp - // - publishers/google/models/gemini-1.5-pro - // - projects/.../locations/.../publishers/google/models/gemini-1.5-pro + // - publishers/google/models/gemini-2.5-pro + // - projects/.../locations/.../publishers/google/models/gemini-2.5-pro model = strings.TrimSpace(model) model = strings.TrimLeft(model, "/") model = strings.TrimPrefix(model, "models/") diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 47a04cf5..6b7ebb07 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -73,10 +73,14 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc return false } - tempMatched := false + // 先尝试临时不可调度规则(401除外) + // 如果匹配成功,直接返回,不执行后续禁用逻辑 if statusCode != 401 { - tempMatched = s.tryTempUnschedulable(ctx, account, statusCode, responseBody) + if s.tryTempUnschedulable(ctx, account, statusCode, responseBody) { + return true + } } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(responseBody)) upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) if upstreamMsg != "" { @@ -84,6 +88,14 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc } switch statusCode { + case 400: + // 只有当错误信息包含 "organization has been disabled" 时才禁用 + if strings.Contains(strings.ToLower(upstreamMsg), "organization has been disabled") { + msg := "Organization disabled (400): " + upstreamMsg + s.handleAuthError(ctx, account, msg) + shouldDisable = true + } + // 其他 400 错误(如参数问题)不处理,不禁用账号 case 401: // 对所有 OAuth 账号在 401 错误时调用缓存失效并强制下次刷新 if account.Type == AccountTypeOAuth { @@ -148,9 +160,6 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc } } - if tempMatched { - return true - } return shouldDisable } @@ -190,7 +199,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, start := geminiDailyWindowStart(now) totals, ok := s.getGeminiUsageTotals(account.ID, start, now) if !ok { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return true, err } @@ -237,7 +246,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, if limit > 0 { start := now.Truncate(time.Minute) - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil) if err != nil { return true, err } @@ -334,9 +343,48 @@ func (s *RateLimitService) handleCustomErrorCode(ctx context.Context, account *A // handle429 处理429限流错误 // 解析响应头获取重置时间,标记账号为限流状态 func (s *RateLimitService) handle429(ctx context.Context, account *Account, headers http.Header, responseBody []byte) { - // 解析重置时间戳 + // 1. OpenAI 平台:优先尝试解析 x-codex-* 响应头(用于 rate_limit_exceeded) + if account.Platform == PlatformOpenAI { + if resetAt := s.calculateOpenAI429ResetTime(headers); resetAt != nil { + if err := s.accountRepo.SetRateLimited(ctx, account.ID, *resetAt); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + return + } + slog.Info("openai_account_rate_limited", "account_id", account.ID, "reset_at", *resetAt) + return + } + } + + // 2. 尝试从响应头解析重置时间(Anthropic) resetTimestamp := headers.Get("anthropic-ratelimit-unified-reset") + + // 3. 如果响应头没有,尝试从响应体解析(OpenAI usage_limit_reached, Gemini) if resetTimestamp == "" { + switch account.Platform { + case PlatformOpenAI: + // 尝试解析 OpenAI 的 usage_limit_reached 错误 + if resetAt := parseOpenAIRateLimitResetTime(responseBody); resetAt != nil { + resetTime := time.Unix(*resetAt, 0) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetTime); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + return + } + slog.Info("account_rate_limited", "account_id", account.ID, "platform", account.Platform, "reset_at", resetTime, "reset_in", time.Until(resetTime).Truncate(time.Second)) + return + } + case PlatformGemini, PlatformAntigravity: + // 尝试解析 Gemini 格式(用于其他平台) + if resetAt := ParseGeminiRateLimitResetTime(responseBody); resetAt != nil { + resetTime := time.Unix(*resetAt, 0) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetTime); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + return + } + slog.Info("account_rate_limited", "account_id", account.ID, "platform", account.Platform, "reset_at", resetTime, "reset_in", time.Until(resetTime).Truncate(time.Second)) + return + } + } + // 没有重置时间,使用默认5分钟 resetAt := time.Now().Add(5 * time.Minute) if s.shouldScopeClaudeSonnetRateLimit(account, responseBody) { @@ -347,6 +395,7 @@ func (s *RateLimitService) handle429(ctx context.Context, account *Account, head } return } + slog.Warn("rate_limit_no_reset_time", "account_id", account.ID, "platform", account.Platform, "using_default", "5m") if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil { slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) } @@ -410,6 +459,108 @@ func (s *RateLimitService) shouldScopeClaudeSonnetRateLimit(account *Account, re return strings.Contains(msg, "sonnet") } +// calculateOpenAI429ResetTime 从 OpenAI 429 响应头计算正确的重置时间 +// 返回 nil 表示无法从响应头中确定重置时间 +func (s *RateLimitService) calculateOpenAI429ResetTime(headers http.Header) *time.Time { + snapshot := ParseCodexRateLimitHeaders(headers) + if snapshot == nil { + return nil + } + + normalized := snapshot.Normalize() + if normalized == nil { + return nil + } + + now := time.Now() + + // 判断哪个限制被触发(used_percent >= 100) + is7dExhausted := normalized.Used7dPercent != nil && *normalized.Used7dPercent >= 100 + is5hExhausted := normalized.Used5hPercent != nil && *normalized.Used5hPercent >= 100 + + // 优先使用被触发限制的重置时间 + if is7dExhausted && normalized.Reset7dSeconds != nil { + resetAt := now.Add(time.Duration(*normalized.Reset7dSeconds) * time.Second) + slog.Info("openai_429_7d_limit_exhausted", "reset_after_seconds", *normalized.Reset7dSeconds, "reset_at", resetAt) + return &resetAt + } + if is5hExhausted && normalized.Reset5hSeconds != nil { + resetAt := now.Add(time.Duration(*normalized.Reset5hSeconds) * time.Second) + slog.Info("openai_429_5h_limit_exhausted", "reset_after_seconds", *normalized.Reset5hSeconds, "reset_at", resetAt) + return &resetAt + } + + // 都未达到100%但收到429,使用较长的重置时间 + var maxResetSecs int + if normalized.Reset7dSeconds != nil && *normalized.Reset7dSeconds > maxResetSecs { + maxResetSecs = *normalized.Reset7dSeconds + } + if normalized.Reset5hSeconds != nil && *normalized.Reset5hSeconds > maxResetSecs { + maxResetSecs = *normalized.Reset5hSeconds + } + if maxResetSecs > 0 { + resetAt := now.Add(time.Duration(maxResetSecs) * time.Second) + slog.Info("openai_429_using_max_reset", "max_reset_seconds", maxResetSecs, "reset_at", resetAt) + return &resetAt + } + + return nil +} + +// parseOpenAIRateLimitResetTime 解析 OpenAI 格式的 429 响应,返回重置时间的 Unix 时间戳 +// OpenAI 的 usage_limit_reached 错误格式: +// +// { +// "error": { +// "message": "The usage limit has been reached", +// "type": "usage_limit_reached", +// "resets_at": 1769404154, +// "resets_in_seconds": 133107 +// } +// } +func parseOpenAIRateLimitResetTime(body []byte) *int64 { + var parsed map[string]any + if err := json.Unmarshal(body, &parsed); err != nil { + return nil + } + + errObj, ok := parsed["error"].(map[string]any) + if !ok { + return nil + } + + // 检查是否为 usage_limit_reached 或 rate_limit_exceeded 类型 + errType, _ := errObj["type"].(string) + if errType != "usage_limit_reached" && errType != "rate_limit_exceeded" { + return nil + } + + // 优先使用 resets_at(Unix 时间戳) + if resetsAt, ok := errObj["resets_at"].(float64); ok { + ts := int64(resetsAt) + return &ts + } + if resetsAt, ok := errObj["resets_at"].(string); ok { + if ts, err := strconv.ParseInt(resetsAt, 10, 64); err == nil { + return &ts + } + } + + // 如果没有 resets_at,尝试使用 resets_in_seconds + if resetsInSeconds, ok := errObj["resets_in_seconds"].(float64); ok { + ts := time.Now().Unix() + int64(resetsInSeconds) + return &ts + } + if resetsInSeconds, ok := errObj["resets_in_seconds"].(string); ok { + if sec, err := strconv.ParseInt(resetsInSeconds, 10, 64); err == nil { + ts := time.Now().Unix() + sec + return &ts + } + } + + return nil +} + // handle529 处理529过载错误 // 根据配置设置过载冷却时间 func (s *RateLimitService) handle529(ctx context.Context, account *Account) { diff --git a/backend/internal/service/ratelimit_service_openai_test.go b/backend/internal/service/ratelimit_service_openai_test.go new file mode 100644 index 00000000..00902068 --- /dev/null +++ b/backend/internal/service/ratelimit_service_openai_test.go @@ -0,0 +1,364 @@ +package service + +import ( + "net/http" + "testing" + "time" +) + +func TestCalculateOpenAI429ResetTime_7dExhausted(t *testing.T) { + svc := &RateLimitService{} + + // Simulate headers when 7d limit is exhausted (100% used) + // Primary = 7d (10080 minutes), Secondary = 5h (300 minutes) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") + headers.Set("x-codex-primary-reset-after-seconds", "384607") // ~4.5 days + headers.Set("x-codex-primary-window-minutes", "10080") // 7 days + headers.Set("x-codex-secondary-used-percent", "3") + headers.Set("x-codex-secondary-reset-after-seconds", "17369") // ~4.8 hours + headers.Set("x-codex-secondary-window-minutes", "300") // 5 hours + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should be approximately 384607 seconds from now + expectedDuration := 384607 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestCalculateOpenAI429ResetTime_5hExhausted(t *testing.T) { + svc := &RateLimitService{} + + // Simulate headers when 5h limit is exhausted (100% used) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "50") + headers.Set("x-codex-primary-reset-after-seconds", "500000") + headers.Set("x-codex-primary-window-minutes", "10080") // 7 days + headers.Set("x-codex-secondary-used-percent", "100") + headers.Set("x-codex-secondary-reset-after-seconds", "3600") // 1 hour + headers.Set("x-codex-secondary-window-minutes", "300") // 5 hours + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should be approximately 3600 seconds from now + expectedDuration := 3600 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestCalculateOpenAI429ResetTime_NeitherExhausted_UsesMax(t *testing.T) { + svc := &RateLimitService{} + + // Neither limit at 100%, should use the longer reset time + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "80") + headers.Set("x-codex-primary-reset-after-seconds", "100000") + headers.Set("x-codex-primary-window-minutes", "10080") + headers.Set("x-codex-secondary-used-percent", "90") + headers.Set("x-codex-secondary-reset-after-seconds", "5000") + headers.Set("x-codex-secondary-window-minutes", "300") + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should use the max (100000 seconds from 7d window) + expectedDuration := 100000 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestCalculateOpenAI429ResetTime_NoCodexHeaders(t *testing.T) { + svc := &RateLimitService{} + + // No codex headers at all + headers := http.Header{} + headers.Set("content-type", "application/json") + + resetAt := svc.calculateOpenAI429ResetTime(headers) + + if resetAt != nil { + t.Errorf("expected nil resetAt when no codex headers, got %v", resetAt) + } +} + +func TestCalculateOpenAI429ResetTime_ReversedWindowOrder(t *testing.T) { + svc := &RateLimitService{} + + // Test when OpenAI sends primary as 5h and secondary as 7d (reversed) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") // This is 5h + headers.Set("x-codex-primary-reset-after-seconds", "3600") // 1 hour + headers.Set("x-codex-primary-window-minutes", "300") // 5 hours - smaller! + headers.Set("x-codex-secondary-used-percent", "50") + headers.Set("x-codex-secondary-reset-after-seconds", "500000") + headers.Set("x-codex-secondary-window-minutes", "10080") // 7 days - larger! + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt") + } + + // Should correctly identify that primary is 5h (smaller window) and use its reset time + expectedDuration := 3600 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } +} + +func TestNormalizedCodexLimits(t *testing.T) { + // Test the Normalize() method directly + pUsed := 100.0 + pReset := 384607 + pWindow := 10080 + sUsed := 3.0 + sReset := 17369 + sWindow := 300 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &pUsed, + PrimaryResetAfterSeconds: &pReset, + PrimaryWindowMinutes: &pWindow, + SecondaryUsedPercent: &sUsed, + SecondaryResetAfterSeconds: &sReset, + SecondaryWindowMinutes: &sWindow, + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Primary has larger window (10080 > 300), so primary should be 7d + if normalized.Used7dPercent == nil || *normalized.Used7dPercent != 100.0 { + t.Errorf("expected Used7dPercent=100, got %v", normalized.Used7dPercent) + } + if normalized.Reset7dSeconds == nil || *normalized.Reset7dSeconds != 384607 { + t.Errorf("expected Reset7dSeconds=384607, got %v", normalized.Reset7dSeconds) + } + if normalized.Used5hPercent == nil || *normalized.Used5hPercent != 3.0 { + t.Errorf("expected Used5hPercent=3, got %v", normalized.Used5hPercent) + } + if normalized.Reset5hSeconds == nil || *normalized.Reset5hSeconds != 17369 { + t.Errorf("expected Reset5hSeconds=17369, got %v", normalized.Reset5hSeconds) + } +} + +func TestNormalizedCodexLimits_OnlyPrimaryData(t *testing.T) { + // Test when only primary has data, no window_minutes + pUsed := 80.0 + pReset := 50000 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &pUsed, + PrimaryResetAfterSeconds: &pReset, + // No window_minutes, no secondary data + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Legacy assumption: primary=7d, secondary=5h + if normalized.Used7dPercent == nil || *normalized.Used7dPercent != 80.0 { + t.Errorf("expected Used7dPercent=80, got %v", normalized.Used7dPercent) + } + if normalized.Reset7dSeconds == nil || *normalized.Reset7dSeconds != 50000 { + t.Errorf("expected Reset7dSeconds=50000, got %v", normalized.Reset7dSeconds) + } + // Secondary (5h) should be nil + if normalized.Used5hPercent != nil { + t.Errorf("expected Used5hPercent=nil, got %v", *normalized.Used5hPercent) + } + if normalized.Reset5hSeconds != nil { + t.Errorf("expected Reset5hSeconds=nil, got %v", *normalized.Reset5hSeconds) + } +} + +func TestNormalizedCodexLimits_OnlySecondaryData(t *testing.T) { + // Test when only secondary has data, no window_minutes + sUsed := 60.0 + sReset := 3000 + + snapshot := &OpenAICodexUsageSnapshot{ + SecondaryUsedPercent: &sUsed, + SecondaryResetAfterSeconds: &sReset, + // No window_minutes, no primary data + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Legacy assumption: primary=7d, secondary=5h + // So secondary goes to 5h + if normalized.Used5hPercent == nil || *normalized.Used5hPercent != 60.0 { + t.Errorf("expected Used5hPercent=60, got %v", normalized.Used5hPercent) + } + if normalized.Reset5hSeconds == nil || *normalized.Reset5hSeconds != 3000 { + t.Errorf("expected Reset5hSeconds=3000, got %v", normalized.Reset5hSeconds) + } + // Primary (7d) should be nil + if normalized.Used7dPercent != nil { + t.Errorf("expected Used7dPercent=nil, got %v", *normalized.Used7dPercent) + } +} + +func TestNormalizedCodexLimits_BothDataNoWindowMinutes(t *testing.T) { + // Test when both have data but no window_minutes + pUsed := 100.0 + pReset := 400000 + sUsed := 50.0 + sReset := 10000 + + snapshot := &OpenAICodexUsageSnapshot{ + PrimaryUsedPercent: &pUsed, + PrimaryResetAfterSeconds: &pReset, + SecondaryUsedPercent: &sUsed, + SecondaryResetAfterSeconds: &sReset, + // No window_minutes + } + + normalized := snapshot.Normalize() + if normalized == nil { + t.Fatal("expected non-nil normalized") + } + + // Legacy assumption: primary=7d, secondary=5h + if normalized.Used7dPercent == nil || *normalized.Used7dPercent != 100.0 { + t.Errorf("expected Used7dPercent=100, got %v", normalized.Used7dPercent) + } + if normalized.Reset7dSeconds == nil || *normalized.Reset7dSeconds != 400000 { + t.Errorf("expected Reset7dSeconds=400000, got %v", normalized.Reset7dSeconds) + } + if normalized.Used5hPercent == nil || *normalized.Used5hPercent != 50.0 { + t.Errorf("expected Used5hPercent=50, got %v", normalized.Used5hPercent) + } + if normalized.Reset5hSeconds == nil || *normalized.Reset5hSeconds != 10000 { + t.Errorf("expected Reset5hSeconds=10000, got %v", normalized.Reset5hSeconds) + } +} + +func TestHandle429_AnthropicPlatformUnaffected(t *testing.T) { + // Verify that Anthropic platform accounts still use the original logic + // This test ensures we don't break existing Claude account rate limiting + + svc := &RateLimitService{} + + // Simulate Anthropic 429 headers + headers := http.Header{} + headers.Set("anthropic-ratelimit-unified-reset", "1737820800") // A future Unix timestamp + + // For Anthropic platform, calculateOpenAI429ResetTime should return nil + // because it only handles OpenAI platform + resetAt := svc.calculateOpenAI429ResetTime(headers) + + // Should return nil since there are no x-codex-* headers + if resetAt != nil { + t.Errorf("expected nil for Anthropic headers, got %v", resetAt) + } +} + +func TestCalculateOpenAI429ResetTime_UserProvidedScenario(t *testing.T) { + // This is the exact scenario from the user: + // codex_7d_used_percent: 100 + // codex_7d_reset_after_seconds: 384607 (约4.5天后重置) + // codex_5h_used_percent: 3 + // codex_5h_reset_after_seconds: 17369 (约4.8小时后重置) + + svc := &RateLimitService{} + + // Simulate headers matching user's data + // Note: We need to map the canonical 5h/7d back to primary/secondary + // Based on typical OpenAI behavior: primary=7d (larger window), secondary=5h (smaller window) + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") + headers.Set("x-codex-primary-reset-after-seconds", "384607") + headers.Set("x-codex-primary-window-minutes", "10080") // 7 days = 10080 minutes + headers.Set("x-codex-secondary-used-percent", "3") + headers.Set("x-codex-secondary-reset-after-seconds", "17369") + headers.Set("x-codex-secondary-window-minutes", "300") // 5 hours = 300 minutes + + before := time.Now() + resetAt := svc.calculateOpenAI429ResetTime(headers) + after := time.Now() + + if resetAt == nil { + t.Fatal("expected non-nil resetAt for user scenario") + } + + // Should use the 7d reset time (384607 seconds) since 7d limit is exhausted (100%) + expectedDuration := 384607 * time.Second + minExpected := before.Add(expectedDuration) + maxExpected := after.Add(expectedDuration) + + if resetAt.Before(minExpected) || resetAt.After(maxExpected) { + t.Errorf("resetAt %v not in expected range [%v, %v]", resetAt, minExpected, maxExpected) + } + + // Verify it's approximately 4.45 days (384607 seconds) + duration := resetAt.Sub(before) + actualDays := duration.Hours() / 24.0 + + // 384607 / 86400 = ~4.45 days + if actualDays < 4.4 || actualDays > 4.5 { + t.Errorf("expected ~4.45 days, got %.2f days", actualDays) + } + + t.Logf("User scenario: reset_at=%v, duration=%.2f days", resetAt, actualDays) +} + +func TestCalculateOpenAI429ResetTime_5MinFallbackWhenNoReset(t *testing.T) { + // Test that we return nil when there's used_percent but no reset_after_seconds + // This should cause the caller to use the default 5-minute fallback + + svc := &RateLimitService{} + + headers := http.Header{} + headers.Set("x-codex-primary-used-percent", "100") + // No reset_after_seconds! + + resetAt := svc.calculateOpenAI429ResetTime(headers) + + // Should return nil since there's no reset time available + if resetAt != nil { + t.Errorf("expected nil when no reset_after_seconds, got %v", resetAt) + } +} diff --git a/backend/internal/service/session_limit_cache.go b/backend/internal/service/session_limit_cache.go index f6f0c26a..5482d610 100644 --- a/backend/internal/service/session_limit_cache.go +++ b/backend/internal/service/session_limit_cache.go @@ -38,8 +38,9 @@ type SessionLimitCache interface { GetActiveSessionCount(ctx context.Context, accountID int64) (int, error) // GetActiveSessionCountBatch 批量获取多个账号的活跃会话数 + // idleTimeouts: 每个账号的空闲超时时间配置,key 为 accountID;若为 nil 或某账号不在其中,则使用默认超时 // 返回 map[accountID]count,查询失败的账号不在 map 中 - GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) + GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64, idleTimeouts map[int64]time.Duration) (map[int64]int, error) // IsSessionActive 检查特定会话是否活跃(未过期) IsSessionActive(ctx context.Context, accountID int64, sessionUUID string) (bool, error) diff --git a/backend/internal/service/setting_service.go b/backend/internal/service/setting_service.go index 0a7426f8..60ae9543 100644 --- a/backend/internal/service/setting_service.go +++ b/backend/internal/service/setting_service.go @@ -60,6 +60,9 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings keys := []string{ SettingKeyRegistrationEnabled, SettingKeyEmailVerifyEnabled, + SettingKeyPromoCodeEnabled, + SettingKeyPasswordResetEnabled, + SettingKeyTotpEnabled, SettingKeyTurnstileEnabled, SettingKeyTurnstileSiteKey, SettingKeySiteName, @@ -69,6 +72,9 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings SettingKeyContactInfo, SettingKeyDocURL, SettingKeyHomeContent, + SettingKeyHideCcsImportButton, + SettingKeyPurchaseSubscriptionEnabled, + SettingKeyPurchaseSubscriptionURL, SettingKeyLinuxDoConnectEnabled, } @@ -84,19 +90,29 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings linuxDoEnabled = s.cfg != nil && s.cfg.LinuxDo.Enabled } + // Password reset requires email verification to be enabled + emailVerifyEnabled := settings[SettingKeyEmailVerifyEnabled] == "true" + passwordResetEnabled := emailVerifyEnabled && settings[SettingKeyPasswordResetEnabled] == "true" + return &PublicSettings{ - RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", - EmailVerifyEnabled: settings[SettingKeyEmailVerifyEnabled] == "true", - TurnstileEnabled: settings[SettingKeyTurnstileEnabled] == "true", - TurnstileSiteKey: settings[SettingKeyTurnstileSiteKey], - SiteName: s.getStringOrDefault(settings, SettingKeySiteName, "Sub2API"), - SiteLogo: settings[SettingKeySiteLogo], - SiteSubtitle: s.getStringOrDefault(settings, SettingKeySiteSubtitle, "Subscription to API Conversion Platform"), - APIBaseURL: settings[SettingKeyAPIBaseURL], - ContactInfo: settings[SettingKeyContactInfo], - DocURL: settings[SettingKeyDocURL], - HomeContent: settings[SettingKeyHomeContent], - LinuxDoOAuthEnabled: linuxDoEnabled, + RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", + EmailVerifyEnabled: emailVerifyEnabled, + PromoCodeEnabled: settings[SettingKeyPromoCodeEnabled] != "false", // 默认启用 + PasswordResetEnabled: passwordResetEnabled, + TotpEnabled: settings[SettingKeyTotpEnabled] == "true", + TurnstileEnabled: settings[SettingKeyTurnstileEnabled] == "true", + TurnstileSiteKey: settings[SettingKeyTurnstileSiteKey], + SiteName: s.getStringOrDefault(settings, SettingKeySiteName, "Sub2API"), + SiteLogo: settings[SettingKeySiteLogo], + SiteSubtitle: s.getStringOrDefault(settings, SettingKeySiteSubtitle, "Subscription to API Conversion Platform"), + APIBaseURL: settings[SettingKeyAPIBaseURL], + ContactInfo: settings[SettingKeyContactInfo], + DocURL: settings[SettingKeyDocURL], + HomeContent: settings[SettingKeyHomeContent], + HideCcsImportButton: settings[SettingKeyHideCcsImportButton] == "true", + PurchaseSubscriptionEnabled: settings[SettingKeyPurchaseSubscriptionEnabled] == "true", + PurchaseSubscriptionURL: strings.TrimSpace(settings[SettingKeyPurchaseSubscriptionURL]), + LinuxDoOAuthEnabled: linuxDoEnabled, }, nil } @@ -121,33 +137,45 @@ func (s *SettingService) GetPublicSettingsForInjection(ctx context.Context) (any // Return a struct that matches the frontend's expected format return &struct { - RegistrationEnabled bool `json:"registration_enabled"` - EmailVerifyEnabled bool `json:"email_verify_enabled"` - TurnstileEnabled bool `json:"turnstile_enabled"` - TurnstileSiteKey string `json:"turnstile_site_key,omitempty"` - SiteName string `json:"site_name"` - SiteLogo string `json:"site_logo,omitempty"` - SiteSubtitle string `json:"site_subtitle,omitempty"` - APIBaseURL string `json:"api_base_url,omitempty"` - ContactInfo string `json:"contact_info,omitempty"` - DocURL string `json:"doc_url,omitempty"` - HomeContent string `json:"home_content,omitempty"` - LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` - Version string `json:"version,omitempty"` + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + PromoCodeEnabled bool `json:"promo_code_enabled"` + PasswordResetEnabled bool `json:"password_reset_enabled"` + TotpEnabled bool `json:"totp_enabled"` + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key,omitempty"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo,omitempty"` + SiteSubtitle string `json:"site_subtitle,omitempty"` + APIBaseURL string `json:"api_base_url,omitempty"` + ContactInfo string `json:"contact_info,omitempty"` + DocURL string `json:"doc_url,omitempty"` + HomeContent string `json:"home_content,omitempty"` + HideCcsImportButton bool `json:"hide_ccs_import_button"` + PurchaseSubscriptionEnabled bool `json:"purchase_subscription_enabled"` + PurchaseSubscriptionURL string `json:"purchase_subscription_url,omitempty"` + LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` + Version string `json:"version,omitempty"` }{ - RegistrationEnabled: settings.RegistrationEnabled, - EmailVerifyEnabled: settings.EmailVerifyEnabled, - TurnstileEnabled: settings.TurnstileEnabled, - TurnstileSiteKey: settings.TurnstileSiteKey, - SiteName: settings.SiteName, - SiteLogo: settings.SiteLogo, - SiteSubtitle: settings.SiteSubtitle, - APIBaseURL: settings.APIBaseURL, - ContactInfo: settings.ContactInfo, - DocURL: settings.DocURL, - HomeContent: settings.HomeContent, - LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, - Version: s.version, + RegistrationEnabled: settings.RegistrationEnabled, + EmailVerifyEnabled: settings.EmailVerifyEnabled, + PromoCodeEnabled: settings.PromoCodeEnabled, + PasswordResetEnabled: settings.PasswordResetEnabled, + TotpEnabled: settings.TotpEnabled, + TurnstileEnabled: settings.TurnstileEnabled, + TurnstileSiteKey: settings.TurnstileSiteKey, + SiteName: settings.SiteName, + SiteLogo: settings.SiteLogo, + SiteSubtitle: settings.SiteSubtitle, + APIBaseURL: settings.APIBaseURL, + ContactInfo: settings.ContactInfo, + DocURL: settings.DocURL, + HomeContent: settings.HomeContent, + HideCcsImportButton: settings.HideCcsImportButton, + PurchaseSubscriptionEnabled: settings.PurchaseSubscriptionEnabled, + PurchaseSubscriptionURL: settings.PurchaseSubscriptionURL, + LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, + Version: s.version, }, nil } @@ -158,6 +186,9 @@ func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSet // 注册设置 updates[SettingKeyRegistrationEnabled] = strconv.FormatBool(settings.RegistrationEnabled) updates[SettingKeyEmailVerifyEnabled] = strconv.FormatBool(settings.EmailVerifyEnabled) + updates[SettingKeyPromoCodeEnabled] = strconv.FormatBool(settings.PromoCodeEnabled) + updates[SettingKeyPasswordResetEnabled] = strconv.FormatBool(settings.PasswordResetEnabled) + updates[SettingKeyTotpEnabled] = strconv.FormatBool(settings.TotpEnabled) // 邮件服务设置(只有非空才更新密码) updates[SettingKeySMTPHost] = settings.SMTPHost @@ -193,6 +224,9 @@ func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSet updates[SettingKeyContactInfo] = settings.ContactInfo updates[SettingKeyDocURL] = settings.DocURL updates[SettingKeyHomeContent] = settings.HomeContent + updates[SettingKeyHideCcsImportButton] = strconv.FormatBool(settings.HideCcsImportButton) + updates[SettingKeyPurchaseSubscriptionEnabled] = strconv.FormatBool(settings.PurchaseSubscriptionEnabled) + updates[SettingKeyPurchaseSubscriptionURL] = strings.TrimSpace(settings.PurchaseSubscriptionURL) // 默认配置 updates[SettingKeyDefaultConcurrency] = strconv.Itoa(settings.DefaultConcurrency) @@ -243,6 +277,44 @@ func (s *SettingService) IsEmailVerifyEnabled(ctx context.Context) bool { return value == "true" } +// IsPromoCodeEnabled 检查是否启用优惠码功能 +func (s *SettingService) IsPromoCodeEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyPromoCodeEnabled) + if err != nil { + return true // 默认启用 + } + return value != "false" +} + +// IsPasswordResetEnabled 检查是否启用密码重置功能 +// 要求:必须同时开启邮件验证 +func (s *SettingService) IsPasswordResetEnabled(ctx context.Context) bool { + // Password reset requires email verification to be enabled + if !s.IsEmailVerifyEnabled(ctx) { + return false + } + value, err := s.settingRepo.GetValue(ctx, SettingKeyPasswordResetEnabled) + if err != nil { + return false // 默认关闭 + } + return value == "true" +} + +// IsTotpEnabled 检查是否启用 TOTP 双因素认证功能 +func (s *SettingService) IsTotpEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyTotpEnabled) + if err != nil { + return false // 默认关闭 + } + return value == "true" +} + +// IsTotpEncryptionKeyConfigured 检查 TOTP 加密密钥是否已手动配置 +// 只有手动配置了密钥才允许在管理后台启用 TOTP 功能 +func (s *SettingService) IsTotpEncryptionKeyConfigured() bool { + return s.cfg.Totp.EncryptionKeyConfigured +} + // GetSiteName 获取网站名称 func (s *SettingService) GetSiteName(ctx context.Context) string { value, err := s.settingRepo.GetValue(ctx, SettingKeySiteName) @@ -290,14 +362,17 @@ func (s *SettingService) InitializeDefaultSettings(ctx context.Context) error { // 初始化默认设置 defaults := map[string]string{ - SettingKeyRegistrationEnabled: "true", - SettingKeyEmailVerifyEnabled: "false", - SettingKeySiteName: "Sub2API", - SettingKeySiteLogo: "", - SettingKeyDefaultConcurrency: strconv.Itoa(s.cfg.Default.UserConcurrency), - SettingKeyDefaultBalance: strconv.FormatFloat(s.cfg.Default.UserBalance, 'f', 8, 64), - SettingKeySMTPPort: "587", - SettingKeySMTPUseTLS: "false", + SettingKeyRegistrationEnabled: "true", + SettingKeyEmailVerifyEnabled: "false", + SettingKeyPromoCodeEnabled: "true", // 默认启用优惠码功能 + SettingKeySiteName: "Sub2API", + SettingKeySiteLogo: "", + SettingKeyPurchaseSubscriptionEnabled: "false", + SettingKeyPurchaseSubscriptionURL: "", + SettingKeyDefaultConcurrency: strconv.Itoa(s.cfg.Default.UserConcurrency), + SettingKeyDefaultBalance: strconv.FormatFloat(s.cfg.Default.UserBalance, 'f', 8, 64), + SettingKeySMTPPort: "587", + SettingKeySMTPUseTLS: "false", // Model fallback defaults SettingKeyEnableModelFallback: "false", SettingKeyFallbackModelAnthropic: "claude-3-5-sonnet-20241022", @@ -320,9 +395,13 @@ func (s *SettingService) InitializeDefaultSettings(ctx context.Context) error { // parseSettings 解析设置到结构体 func (s *SettingService) parseSettings(settings map[string]string) *SystemSettings { + emailVerifyEnabled := settings[SettingKeyEmailVerifyEnabled] == "true" result := &SystemSettings{ RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", - EmailVerifyEnabled: settings[SettingKeyEmailVerifyEnabled] == "true", + EmailVerifyEnabled: emailVerifyEnabled, + PromoCodeEnabled: settings[SettingKeyPromoCodeEnabled] != "false", // 默认启用 + PasswordResetEnabled: emailVerifyEnabled && settings[SettingKeyPasswordResetEnabled] == "true", + TotpEnabled: settings[SettingKeyTotpEnabled] == "true", SMTPHost: settings[SettingKeySMTPHost], SMTPUsername: settings[SettingKeySMTPUsername], SMTPFrom: settings[SettingKeySMTPFrom], @@ -339,6 +418,9 @@ func (s *SettingService) parseSettings(settings map[string]string) *SystemSettin ContactInfo: settings[SettingKeyContactInfo], DocURL: settings[SettingKeyDocURL], HomeContent: settings[SettingKeyHomeContent], + HideCcsImportButton: settings[SettingKeyHideCcsImportButton] == "true", + PurchaseSubscriptionEnabled: settings[SettingKeyPurchaseSubscriptionEnabled] == "true", + PurchaseSubscriptionURL: strings.TrimSpace(settings[SettingKeyPurchaseSubscriptionURL]), } // 解析整数类型 diff --git a/backend/internal/service/settings_view.go b/backend/internal/service/settings_view.go index e4ee2826..358911dc 100644 --- a/backend/internal/service/settings_view.go +++ b/backend/internal/service/settings_view.go @@ -1,8 +1,11 @@ package service type SystemSettings struct { - RegistrationEnabled bool - EmailVerifyEnabled bool + RegistrationEnabled bool + EmailVerifyEnabled bool + PromoCodeEnabled bool + PasswordResetEnabled bool + TotpEnabled bool // TOTP 双因素认证 SMTPHost string SMTPPort int @@ -25,13 +28,16 @@ type SystemSettings struct { LinuxDoConnectClientSecretConfigured bool LinuxDoConnectRedirectURL string - SiteName string - SiteLogo string - SiteSubtitle string - APIBaseURL string - ContactInfo string - DocURL string - HomeContent string + SiteName string + SiteLogo string + SiteSubtitle string + APIBaseURL string + ContactInfo string + DocURL string + HomeContent string + HideCcsImportButton bool + PurchaseSubscriptionEnabled bool + PurchaseSubscriptionURL string DefaultConcurrency int DefaultBalance float64 @@ -55,17 +61,25 @@ type SystemSettings struct { } type PublicSettings struct { - RegistrationEnabled bool - EmailVerifyEnabled bool - TurnstileEnabled bool - TurnstileSiteKey string - SiteName string - SiteLogo string - SiteSubtitle string - APIBaseURL string - ContactInfo string - DocURL string - HomeContent string + RegistrationEnabled bool + EmailVerifyEnabled bool + PromoCodeEnabled bool + PasswordResetEnabled bool + TotpEnabled bool // TOTP 双因素认证 + TurnstileEnabled bool + TurnstileSiteKey string + SiteName string + SiteLogo string + SiteSubtitle string + APIBaseURL string + ContactInfo string + DocURL string + HomeContent string + HideCcsImportButton bool + + PurchaseSubscriptionEnabled bool + PurchaseSubscriptionURL string + LinuxDoOAuthEnabled bool Version string } diff --git a/backend/internal/service/sticky_session_test.go b/backend/internal/service/sticky_session_test.go new file mode 100644 index 00000000..4bd06b7b --- /dev/null +++ b/backend/internal/service/sticky_session_test.go @@ -0,0 +1,54 @@ +//go:build unit + +// Package service 提供 API 网关核心服务。 +// 本文件包含 shouldClearStickySession 函数的单元测试, +// 验证粘性会话清理逻辑在各种账号状态下的正确行为。 +// +// This file contains unit tests for the shouldClearStickySession function, +// verifying correct sticky session clearing behavior under various account states. +package service + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestShouldClearStickySession 测试粘性会话清理判断逻辑。 +// 验证在以下情况下是否正确判断需要清理粘性会话: +// - nil 账号:不清理(返回 false) +// - 状态为错误或禁用:清理 +// - 不可调度:清理 +// - 临时不可调度且未过期:清理 +// - 临时不可调度已过期:不清理 +// - 正常可调度状态:不清理 +// +// TestShouldClearStickySession tests the sticky session clearing logic. +// Verifies correct behavior for various account states including: +// nil account, error/disabled status, unschedulable, temporary unschedulable. +func TestShouldClearStickySession(t *testing.T) { + now := time.Now() + future := now.Add(1 * time.Hour) + past := now.Add(-1 * time.Hour) + + tests := []struct { + name string + account *Account + want bool + }{ + {name: "nil account", account: nil, want: false}, + {name: "status error", account: &Account{Status: StatusError, Schedulable: true}, want: true}, + {name: "status disabled", account: &Account{Status: StatusDisabled, Schedulable: true}, want: true}, + {name: "schedulable false", account: &Account{Status: StatusActive, Schedulable: false}, want: true}, + {name: "temp unschedulable", account: &Account{Status: StatusActive, Schedulable: true, TempUnschedulableUntil: &future}, want: true}, + {name: "temp unschedulable expired", account: &Account{Status: StatusActive, Schedulable: true, TempUnschedulableUntil: &past}, want: false}, + {name: "active schedulable", account: &Account{Status: StatusActive, Schedulable: true}, want: false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.want, shouldClearStickySession(tt.account)) + }) + } +} diff --git a/backend/internal/service/subscription_expiry_service.go b/backend/internal/service/subscription_expiry_service.go new file mode 100644 index 00000000..ce6b32b8 --- /dev/null +++ b/backend/internal/service/subscription_expiry_service.go @@ -0,0 +1,71 @@ +package service + +import ( + "context" + "log" + "sync" + "time" +) + +// SubscriptionExpiryService periodically updates expired subscription status. +type SubscriptionExpiryService struct { + userSubRepo UserSubscriptionRepository + interval time.Duration + stopCh chan struct{} + stopOnce sync.Once + wg sync.WaitGroup +} + +func NewSubscriptionExpiryService(userSubRepo UserSubscriptionRepository, interval time.Duration) *SubscriptionExpiryService { + return &SubscriptionExpiryService{ + userSubRepo: userSubRepo, + interval: interval, + stopCh: make(chan struct{}), + } +} + +func (s *SubscriptionExpiryService) Start() { + if s == nil || s.userSubRepo == nil || s.interval <= 0 { + return + } + s.wg.Add(1) + go func() { + defer s.wg.Done() + ticker := time.NewTicker(s.interval) + defer ticker.Stop() + + s.runOnce() + for { + select { + case <-ticker.C: + s.runOnce() + case <-s.stopCh: + return + } + } + }() +} + +func (s *SubscriptionExpiryService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + close(s.stopCh) + }) + s.wg.Wait() +} + +func (s *SubscriptionExpiryService) runOnce() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + updated, err := s.userSubRepo.BatchUpdateExpiredStatus(ctx) + if err != nil { + log.Printf("[SubscriptionExpiry] Update expired subscriptions failed: %v", err) + return + } + if updated > 0 { + log.Printf("[SubscriptionExpiry] Updated %d expired subscriptions", updated) + } +} diff --git a/backend/internal/service/subscription_service.go b/backend/internal/service/subscription_service.go index d960c86f..3c42852e 100644 --- a/backend/internal/service/subscription_service.go +++ b/backend/internal/service/subscription_service.go @@ -27,6 +27,7 @@ var ( ErrWeeklyLimitExceeded = infraerrors.TooManyRequests("WEEKLY_LIMIT_EXCEEDED", "weekly usage limit exceeded") ErrMonthlyLimitExceeded = infraerrors.TooManyRequests("MONTHLY_LIMIT_EXCEEDED", "monthly usage limit exceeded") ErrSubscriptionNilInput = infraerrors.BadRequest("SUBSCRIPTION_NIL_INPUT", "subscription input cannot be nil") + ErrAdjustWouldExpire = infraerrors.BadRequest("ADJUST_WOULD_EXPIRE", "adjustment would result in expired subscription (remaining days must be > 0)") ) // SubscriptionService 订阅服务 @@ -308,24 +309,48 @@ func (s *SubscriptionService) RevokeSubscription(ctx context.Context, subscripti return nil } -// ExtendSubscription 延长订阅 +// ExtendSubscription 调整订阅时长(正数延长,负数缩短) func (s *SubscriptionService) ExtendSubscription(ctx context.Context, subscriptionID int64, days int) (*UserSubscription, error) { sub, err := s.userSubRepo.GetByID(ctx, subscriptionID) if err != nil { return nil, ErrSubscriptionNotFound } - // 限制延长天数 + // 限制调整天数范围 if days > MaxValidityDays { days = MaxValidityDays } + if days < -MaxValidityDays { + days = -MaxValidityDays + } + + now := time.Now() + isExpired := !sub.ExpiresAt.After(now) + + // 如果订阅已过期,不允许负向调整 + if isExpired && days < 0 { + return nil, infraerrors.BadRequest("CANNOT_SHORTEN_EXPIRED", "cannot shorten an expired subscription") + } // 计算新的过期时间 - newExpiresAt := sub.ExpiresAt.AddDate(0, 0, days) + var newExpiresAt time.Time + if isExpired { + // 已过期:从当前时间开始增加天数 + newExpiresAt = now.AddDate(0, 0, days) + } else { + // 未过期:从原过期时间增加/减少天数 + newExpiresAt = sub.ExpiresAt.AddDate(0, 0, days) + } + if newExpiresAt.After(MaxExpiresAt) { newExpiresAt = MaxExpiresAt } + // 检查新的过期时间必须大于当前时间 + if !newExpiresAt.After(now) { + return nil, ErrAdjustWouldExpire + } + if err := s.userSubRepo.ExtendExpiry(ctx, subscriptionID, newExpiresAt); err != nil { return nil, err } @@ -371,6 +396,7 @@ func (s *SubscriptionService) ListUserSubscriptions(ctx context.Context, userID return nil, err } normalizeExpiredWindows(subs) + normalizeSubscriptionStatus(subs) return subs, nil } @@ -392,17 +418,19 @@ func (s *SubscriptionService) ListGroupSubscriptions(ctx context.Context, groupI return nil, nil, err } normalizeExpiredWindows(subs) + normalizeSubscriptionStatus(subs) return subs, pag, nil } -// List 获取所有订阅(分页,支持筛选) -func (s *SubscriptionService) List(ctx context.Context, page, pageSize int, userID, groupID *int64, status string) ([]UserSubscription, *pagination.PaginationResult, error) { +// List 获取所有订阅(分页,支持筛选和排序) +func (s *SubscriptionService) List(ctx context.Context, page, pageSize int, userID, groupID *int64, status, sortBy, sortOrder string) ([]UserSubscription, *pagination.PaginationResult, error) { params := pagination.PaginationParams{Page: page, PageSize: pageSize} - subs, pag, err := s.userSubRepo.List(ctx, params, userID, groupID, status) + subs, pag, err := s.userSubRepo.List(ctx, params, userID, groupID, status, sortBy, sortOrder) if err != nil { return nil, nil, err } normalizeExpiredWindows(subs) + normalizeSubscriptionStatus(subs) return subs, pag, nil } @@ -429,6 +457,18 @@ func normalizeExpiredWindows(subs []UserSubscription) { } } +// normalizeSubscriptionStatus 根据实际过期时间修正状态(仅影响返回数据,不影响数据库) +// 这确保前端显示正确的状态,即使定时任务尚未更新数据库 +func normalizeSubscriptionStatus(subs []UserSubscription) { + now := time.Now() + for i := range subs { + sub := &subs[i] + if sub.Status == SubscriptionStatusActive && !sub.ExpiresAt.After(now) { + sub.Status = SubscriptionStatusExpired + } + } +} + // startOfDay 返回给定时间所在日期的零点(保持原时区) func startOfDay(t time.Time) time.Time { return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) @@ -647,11 +687,6 @@ func (s *SubscriptionService) GetUserSubscriptionsWithProgress(ctx context.Conte return progresses, nil } -// UpdateExpiredSubscriptions 更新过期订阅状态(定时任务调用) -func (s *SubscriptionService) UpdateExpiredSubscriptions(ctx context.Context) (int64, error) { - return s.userSubRepo.BatchUpdateExpiredStatus(ctx) -} - // ValidateSubscription 验证订阅是否有效 func (s *SubscriptionService) ValidateSubscription(ctx context.Context, sub *UserSubscription) error { if sub.Status == SubscriptionStatusExpired { diff --git a/backend/internal/service/token_cache_invalidator.go b/backend/internal/service/token_cache_invalidator.go index 1117d2f1..74c9edc3 100644 --- a/backend/internal/service/token_cache_invalidator.go +++ b/backend/internal/service/token_cache_invalidator.go @@ -1,6 +1,10 @@ package service -import "context" +import ( + "context" + "log/slog" + "strconv" +) type TokenCacheInvalidator interface { InvalidateToken(ctx context.Context, account *Account) error @@ -24,18 +28,87 @@ func (c *CompositeTokenCacheInvalidator) InvalidateToken(ctx context.Context, ac return nil } - var cacheKey string + var keysToDelete []string + accountIDKey := "account:" + strconv.FormatInt(account.ID, 10) + switch account.Platform { case PlatformGemini: - cacheKey = GeminiTokenCacheKey(account) + // Gemini 可能有两种缓存键:project_id 或 account_id + // 首次获取 token 时可能没有 project_id,之后自动检测到 project_id 后会使用新 key + // 刷新时需要同时删除两种可能的 key,确保不会遗留旧缓存 + keysToDelete = append(keysToDelete, GeminiTokenCacheKey(account)) + keysToDelete = append(keysToDelete, "gemini:"+accountIDKey) case PlatformAntigravity: - cacheKey = AntigravityTokenCacheKey(account) + // Antigravity 同样可能有两种缓存键 + keysToDelete = append(keysToDelete, AntigravityTokenCacheKey(account)) + keysToDelete = append(keysToDelete, "ag:"+accountIDKey) case PlatformOpenAI: - cacheKey = OpenAITokenCacheKey(account) + keysToDelete = append(keysToDelete, OpenAITokenCacheKey(account)) case PlatformAnthropic: - cacheKey = ClaudeTokenCacheKey(account) + keysToDelete = append(keysToDelete, ClaudeTokenCacheKey(account)) default: return nil } - return c.cache.DeleteAccessToken(ctx, cacheKey) + + // 删除所有可能的缓存键(去重后) + seen := make(map[string]bool) + for _, key := range keysToDelete { + if seen[key] { + continue + } + seen[key] = true + if err := c.cache.DeleteAccessToken(ctx, key); err != nil { + slog.Warn("token_cache_delete_failed", "key", key, "account_id", account.ID, "error", err) + } + } + + return nil +} + +// CheckTokenVersion 检查 account 的 token 版本是否已过时,并返回最新的 account +// 用于解决异步刷新任务与请求线程的竞态条件: +// 如果刷新任务已更新 token 并删除缓存,此时请求线程的旧 account 对象不应写入缓存 +// +// 返回值: +// - latestAccount: 从 DB 获取的最新 account(如果查询失败则返回 nil) +// - isStale: true 表示 token 已过时(应使用 latestAccount),false 表示可以使用当前 account +func CheckTokenVersion(ctx context.Context, account *Account, repo AccountRepository) (latestAccount *Account, isStale bool) { + if account == nil || repo == nil { + return nil, false + } + + currentVersion := account.GetCredentialAsInt64("_token_version") + + latestAccount, err := repo.GetByID(ctx, account.ID) + if err != nil || latestAccount == nil { + // 查询失败,默认允许缓存,不返回 latestAccount + return nil, false + } + + latestVersion := latestAccount.GetCredentialAsInt64("_token_version") + + // 情况1: 当前 account 没有版本号,但 DB 中已有版本号 + // 说明异步刷新任务已更新 token,当前 account 已过时 + if currentVersion == 0 && latestVersion > 0 { + slog.Debug("token_version_stale_no_current_version", + "account_id", account.ID, + "latest_version", latestVersion) + return latestAccount, true + } + + // 情况2: 两边都没有版本号,说明从未被异步刷新过,允许缓存 + if currentVersion == 0 && latestVersion == 0 { + return latestAccount, false + } + + // 情况3: 比较版本号,如果 DB 中的版本更新,当前 account 已过时 + if latestVersion > currentVersion { + slog.Debug("token_version_stale", + "account_id", account.ID, + "current_version", currentVersion, + "latest_version", latestVersion) + return latestAccount, true + } + + return latestAccount, false } diff --git a/backend/internal/service/token_cache_invalidator_test.go b/backend/internal/service/token_cache_invalidator_test.go index 30d208ce..8342cf39 100644 --- a/backend/internal/service/token_cache_invalidator_test.go +++ b/backend/internal/service/token_cache_invalidator_test.go @@ -51,7 +51,27 @@ func TestCompositeTokenCacheInvalidator_Gemini(t *testing.T) { err := invalidator.InvalidateToken(context.Background(), account) require.NoError(t, err) - require.Equal(t, []string{"gemini:project-x"}, cache.deletedKeys) + // 新行为:同时删除基于 project_id 和 account_id 的缓存键 + // 这是为了处理:首次获取 token 时可能没有 project_id,之后自动检测到后会使用新 key + require.Equal(t, []string{"gemini:project-x", "gemini:account:10"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_GeminiWithoutProjectID(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 10, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "gemini-token", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + // 没有 project_id 时,两个 key 相同,去重后只删除一个 + require.Equal(t, []string{"gemini:account:10"}, cache.deletedKeys) } func TestCompositeTokenCacheInvalidator_Antigravity(t *testing.T) { @@ -68,7 +88,26 @@ func TestCompositeTokenCacheInvalidator_Antigravity(t *testing.T) { err := invalidator.InvalidateToken(context.Background(), account) require.NoError(t, err) - require.Equal(t, []string{"ag:ag-project"}, cache.deletedKeys) + // 新行为:同时删除基于 project_id 和 account_id 的缓存键 + require.Equal(t, []string{"ag:ag-project", "ag:account:99"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_AntigravityWithoutProjectID(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 99, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "ag-token", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + // 没有 project_id 时,两个 key 相同,去重后只删除一个 + require.Equal(t, []string{"ag:account:99"}, cache.deletedKeys) } func TestCompositeTokenCacheInvalidator_OpenAI(t *testing.T) { @@ -233,9 +272,10 @@ func TestCompositeTokenCacheInvalidator_DeleteError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // 新行为:删除失败只记录日志,不返回错误 + // 这是因为缓存失效失败不应影响主业务流程 err := invalidator.InvalidateToken(context.Background(), tt.account) - require.Error(t, err) - require.Equal(t, expectedErr, err) + require.NoError(t, err) }) } } @@ -252,9 +292,12 @@ func TestCompositeTokenCacheInvalidator_AllPlatformsIntegration(t *testing.T) { {ID: 4, Platform: PlatformAnthropic, Type: AccountTypeOAuth}, } + // 新行为:Gemini 和 Antigravity 会同时删除基于 project_id 和 account_id 的键 expectedKeys := []string{ "gemini:gemini-proj", + "gemini:account:1", "ag:ag-proj", + "ag:account:2", "openai:account:3", "claude:account:4", } @@ -266,3 +309,239 @@ func TestCompositeTokenCacheInvalidator_AllPlatformsIntegration(t *testing.T) { require.Equal(t, expectedKeys, cache.deletedKeys) } + +// ========== GetCredentialAsInt64 测试 ========== + +func TestAccount_GetCredentialAsInt64(t *testing.T) { + tests := []struct { + name string + credentials map[string]any + key string + expected int64 + }{ + { + name: "int64_value", + credentials: map[string]any{"_token_version": int64(1737654321000)}, + key: "_token_version", + expected: 1737654321000, + }, + { + name: "float64_value", + credentials: map[string]any{"_token_version": float64(1737654321000)}, + key: "_token_version", + expected: 1737654321000, + }, + { + name: "int_value", + credentials: map[string]any{"_token_version": 12345}, + key: "_token_version", + expected: 12345, + }, + { + name: "string_value", + credentials: map[string]any{"_token_version": "1737654321000"}, + key: "_token_version", + expected: 1737654321000, + }, + { + name: "string_with_spaces", + credentials: map[string]any{"_token_version": " 1737654321000 "}, + key: "_token_version", + expected: 1737654321000, + }, + { + name: "nil_credentials", + credentials: nil, + key: "_token_version", + expected: 0, + }, + { + name: "missing_key", + credentials: map[string]any{"other_key": 123}, + key: "_token_version", + expected: 0, + }, + { + name: "nil_value", + credentials: map[string]any{"_token_version": nil}, + key: "_token_version", + expected: 0, + }, + { + name: "invalid_string", + credentials: map[string]any{"_token_version": "not_a_number"}, + key: "_token_version", + expected: 0, + }, + { + name: "empty_string", + credentials: map[string]any{"_token_version": ""}, + key: "_token_version", + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{Credentials: tt.credentials} + result := account.GetCredentialAsInt64(tt.key) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestAccount_GetCredentialAsInt64_NilAccount(t *testing.T) { + var account *Account + result := account.GetCredentialAsInt64("_token_version") + require.Equal(t, int64(0), result) +} + +// ========== CheckTokenVersion 测试 ========== + +func TestCheckTokenVersion(t *testing.T) { + tests := []struct { + name string + account *Account + latestAccount *Account + repoErr error + expectedStale bool + }{ + { + name: "nil_account", + account: nil, + latestAccount: nil, + expectedStale: false, + }, + { + name: "no_version_in_account_but_db_has_version", + account: &Account{ + ID: 1, + Credentials: map[string]any{}, + }, + latestAccount: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + }, + expectedStale: true, // 当前 account 无版本但 DB 有,说明已被异步刷新,当前已过时 + }, + { + name: "both_no_version", + account: &Account{ + ID: 1, + Credentials: map[string]any{}, + }, + latestAccount: &Account{ + ID: 1, + Credentials: map[string]any{}, + }, + expectedStale: false, // 两边都没有版本号,说明从未被异步刷新过,允许缓存 + }, + { + name: "same_version", + account: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + }, + latestAccount: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + }, + expectedStale: false, + }, + { + name: "current_version_newer", + account: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(200)}, + }, + latestAccount: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + }, + expectedStale: false, + }, + { + name: "current_version_older_stale", + account: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + }, + latestAccount: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(200)}, + }, + expectedStale: true, // 当前版本过时 + }, + { + name: "repo_error", + account: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + }, + latestAccount: nil, + repoErr: errors.New("db error"), + expectedStale: false, // 查询失败,默认允许缓存 + }, + { + name: "repo_returns_nil", + account: &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + }, + latestAccount: nil, + repoErr: nil, + expectedStale: false, // 查询返回 nil,默认允许缓存 + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // 由于 CheckTokenVersion 接受 AccountRepository 接口,而创建完整的 mock 很繁琐 + // 这里我们直接测试函数的核心逻辑来验证行为 + + if tt.name == "nil_account" { + _, isStale := CheckTokenVersion(context.Background(), nil, nil) + require.Equal(t, tt.expectedStale, isStale) + return + } + + // 模拟 CheckTokenVersion 的核心逻辑 + account := tt.account + currentVersion := account.GetCredentialAsInt64("_token_version") + + // 模拟 repo 查询 + latestAccount := tt.latestAccount + if tt.repoErr != nil || latestAccount == nil { + require.Equal(t, tt.expectedStale, false) + return + } + + latestVersion := latestAccount.GetCredentialAsInt64("_token_version") + + // 情况1: 当前 account 没有版本号,但 DB 中已有版本号 + if currentVersion == 0 && latestVersion > 0 { + require.Equal(t, tt.expectedStale, true) + return + } + + // 情况2: 两边都没有版本号 + if currentVersion == 0 && latestVersion == 0 { + require.Equal(t, tt.expectedStale, false) + return + } + + // 情况3: 比较版本号 + isStale := latestVersion > currentVersion + require.Equal(t, tt.expectedStale, isStale) + }) + } +} + +func TestCheckTokenVersion_NilRepo(t *testing.T) { + account := &Account{ + ID: 1, + Credentials: map[string]any{"_token_version": int64(100)}, + } + _, isStale := CheckTokenVersion(context.Background(), account, nil) + require.False(t, isStale) // nil repo,默认允许缓存 +} diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go index 02e7d445..c33cbf48 100644 --- a/backend/internal/service/token_refresh_service.go +++ b/backend/internal/service/token_refresh_service.go @@ -18,6 +18,7 @@ type TokenRefreshService struct { refreshers []TokenRefresher cfg *config.TokenRefreshConfig cacheInvalidator TokenCacheInvalidator + schedulerCache SchedulerCache // 用于同步更新调度器缓存,解决 token 刷新后缓存不一致问题 stopCh chan struct{} wg sync.WaitGroup @@ -31,12 +32,14 @@ func NewTokenRefreshService( geminiOAuthService *GeminiOAuthService, antigravityOAuthService *AntigravityOAuthService, cacheInvalidator TokenCacheInvalidator, + schedulerCache SchedulerCache, cfg *config.Config, ) *TokenRefreshService { s := &TokenRefreshService{ accountRepo: accountRepo, cfg: &cfg.TokenRefresh, cacheInvalidator: cacheInvalidator, + schedulerCache: schedulerCache, stopCh: make(chan struct{}), } @@ -169,6 +172,10 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc // 如果有新凭证,先更新(即使有错误也要保存 token) if newCredentials != nil { + // 记录刷新版本时间戳,用于解决缓存一致性问题 + // TokenProvider 写入缓存前会检查此版本,如果版本已更新则跳过写入 + newCredentials["_token_version"] = time.Now().UnixMilli() + account.Credentials = newCredentials if saveErr := s.accountRepo.Update(ctx, account); saveErr != nil { return fmt.Errorf("failed to save credentials: %w", saveErr) @@ -194,6 +201,15 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc log.Printf("[TokenRefresh] Token cache invalidated for account %d", account.ID) } } + // 同步更新调度器缓存,确保调度获取的 Account 对象包含最新的 credentials + // 这解决了 token 刷新后调度器缓存数据不一致的问题(#445) + if s.schedulerCache != nil { + if err := s.schedulerCache.SetAccount(ctx, account); err != nil { + log.Printf("[TokenRefresh] Failed to sync scheduler cache for account %d: %v", account.ID, err) + } else { + log.Printf("[TokenRefresh] Scheduler cache synced for account %d", account.ID) + } + } return nil } @@ -233,7 +249,8 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc } // isNonRetryableRefreshError 判断是否为不可重试的刷新错误 -// 这些错误通常表示凭证已失效,需要用户重新授权 +// 这些错误通常表示凭证已失效或配置确实缺失,需要用户重新授权 +// 注意:missing_project_id 错误只在真正缺失(从未获取过)时返回,临时获取失败不会返回此错误 func isNonRetryableRefreshError(err error) bool { if err == nil { return false diff --git a/backend/internal/service/token_refresh_service_test.go b/backend/internal/service/token_refresh_service_test.go index d23a0bb6..8e16c6f5 100644 --- a/backend/internal/service/token_refresh_service_test.go +++ b/backend/internal/service/token_refresh_service_test.go @@ -70,7 +70,7 @@ func TestTokenRefreshService_RefreshWithRetry_InvalidatesCache(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 5, Platform: PlatformGemini, @@ -98,7 +98,7 @@ func TestTokenRefreshService_RefreshWithRetry_InvalidatorErrorIgnored(t *testing RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 6, Platform: PlatformGemini, @@ -124,7 +124,7 @@ func TestTokenRefreshService_RefreshWithRetry_NilInvalidator(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, nil, cfg) account := &Account{ ID: 7, Platform: PlatformGemini, @@ -151,7 +151,7 @@ func TestTokenRefreshService_RefreshWithRetry_Antigravity(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 8, Platform: PlatformAntigravity, @@ -179,7 +179,7 @@ func TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 9, Platform: PlatformGemini, @@ -207,7 +207,7 @@ func TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 10, Platform: PlatformOpenAI, // OpenAI OAuth 账户 @@ -235,7 +235,7 @@ func TestTokenRefreshService_RefreshWithRetry_UpdateFailed(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 11, Platform: PlatformGemini, @@ -264,7 +264,7 @@ func TestTokenRefreshService_RefreshWithRetry_RefreshFailed(t *testing.T) { RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 12, Platform: PlatformGemini, @@ -291,7 +291,7 @@ func TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed(t *testin RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 13, Platform: PlatformAntigravity, @@ -318,7 +318,7 @@ func TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError(t *te RetryBackoffSeconds: 0, }, } - service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, nil, cfg) account := &Account{ ID: 14, Platform: PlatformAntigravity, diff --git a/backend/internal/service/totp_service.go b/backend/internal/service/totp_service.go new file mode 100644 index 00000000..5192fe3d --- /dev/null +++ b/backend/internal/service/totp_service.go @@ -0,0 +1,506 @@ +package service + +import ( + "context" + "crypto/rand" + "crypto/subtle" + "encoding/hex" + "fmt" + "log/slog" + "time" + + "github.com/pquerna/otp/totp" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +var ( + ErrTotpNotEnabled = infraerrors.BadRequest("TOTP_NOT_ENABLED", "totp feature is not enabled") + ErrTotpAlreadyEnabled = infraerrors.BadRequest("TOTP_ALREADY_ENABLED", "totp is already enabled for this account") + ErrTotpNotSetup = infraerrors.BadRequest("TOTP_NOT_SETUP", "totp is not set up for this account") + ErrTotpInvalidCode = infraerrors.BadRequest("TOTP_INVALID_CODE", "invalid totp code") + ErrTotpSetupExpired = infraerrors.BadRequest("TOTP_SETUP_EXPIRED", "totp setup session expired") + ErrTotpTooManyAttempts = infraerrors.TooManyRequests("TOTP_TOO_MANY_ATTEMPTS", "too many verification attempts, please try again later") + ErrVerifyCodeRequired = infraerrors.BadRequest("VERIFY_CODE_REQUIRED", "email verification code is required") + ErrPasswordRequired = infraerrors.BadRequest("PASSWORD_REQUIRED", "password is required") +) + +// TotpCache defines cache operations for TOTP service +type TotpCache interface { + // Setup session methods + GetSetupSession(ctx context.Context, userID int64) (*TotpSetupSession, error) + SetSetupSession(ctx context.Context, userID int64, session *TotpSetupSession, ttl time.Duration) error + DeleteSetupSession(ctx context.Context, userID int64) error + + // Login session methods (for 2FA login flow) + GetLoginSession(ctx context.Context, tempToken string) (*TotpLoginSession, error) + SetLoginSession(ctx context.Context, tempToken string, session *TotpLoginSession, ttl time.Duration) error + DeleteLoginSession(ctx context.Context, tempToken string) error + + // Rate limiting + IncrementVerifyAttempts(ctx context.Context, userID int64) (int, error) + GetVerifyAttempts(ctx context.Context, userID int64) (int, error) + ClearVerifyAttempts(ctx context.Context, userID int64) error +} + +// SecretEncryptor defines encryption operations for TOTP secrets +type SecretEncryptor interface { + Encrypt(plaintext string) (string, error) + Decrypt(ciphertext string) (string, error) +} + +// TotpSetupSession represents a TOTP setup session +type TotpSetupSession struct { + Secret string // Plain text TOTP secret (not encrypted yet) + SetupToken string // Random token to verify setup request + CreatedAt time.Time +} + +// TotpLoginSession represents a pending 2FA login session +type TotpLoginSession struct { + UserID int64 + Email string + TokenExpiry time.Time +} + +// TotpStatus represents the TOTP status for a user +type TotpStatus struct { + Enabled bool `json:"enabled"` + EnabledAt *time.Time `json:"enabled_at,omitempty"` + FeatureEnabled bool `json:"feature_enabled"` +} + +// TotpSetupResponse represents the response for initiating TOTP setup +type TotpSetupResponse struct { + Secret string `json:"secret"` + QRCodeURL string `json:"qr_code_url"` + SetupToken string `json:"setup_token"` + Countdown int `json:"countdown"` // seconds until setup expires +} + +const ( + totpSetupTTL = 5 * time.Minute + totpLoginTTL = 5 * time.Minute + totpAttemptsTTL = 15 * time.Minute + maxTotpAttempts = 5 + totpIssuer = "Sub2API" +) + +// TotpService handles TOTP operations +type TotpService struct { + userRepo UserRepository + encryptor SecretEncryptor + cache TotpCache + settingService *SettingService + emailService *EmailService + emailQueueService *EmailQueueService +} + +// NewTotpService creates a new TOTP service +func NewTotpService( + userRepo UserRepository, + encryptor SecretEncryptor, + cache TotpCache, + settingService *SettingService, + emailService *EmailService, + emailQueueService *EmailQueueService, +) *TotpService { + return &TotpService{ + userRepo: userRepo, + encryptor: encryptor, + cache: cache, + settingService: settingService, + emailService: emailService, + emailQueueService: emailQueueService, + } +} + +// GetStatus returns the TOTP status for a user +func (s *TotpService) GetStatus(ctx context.Context, userID int64) (*TotpStatus, error) { + featureEnabled := s.settingService.IsTotpEnabled(ctx) + + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + return &TotpStatus{ + Enabled: user.TotpEnabled, + EnabledAt: user.TotpEnabledAt, + FeatureEnabled: featureEnabled, + }, nil +} + +// InitiateSetup starts the TOTP setup process +// If email verification is enabled, emailCode is required; otherwise password is required +func (s *TotpService) InitiateSetup(ctx context.Context, userID int64, emailCode, password string) (*TotpSetupResponse, error) { + // Check if TOTP feature is enabled globally + if !s.settingService.IsTotpEnabled(ctx) { + return nil, ErrTotpNotEnabled + } + + // Get user and check if TOTP is already enabled + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + if user.TotpEnabled { + return nil, ErrTotpAlreadyEnabled + } + + // Verify identity based on email verification setting + if s.settingService.IsEmailVerifyEnabled(ctx) { + // Email verification enabled - verify email code + if emailCode == "" { + return nil, ErrVerifyCodeRequired + } + if err := s.emailService.VerifyCode(ctx, user.Email, emailCode); err != nil { + return nil, err + } + } else { + // Email verification disabled - verify password + if password == "" { + return nil, ErrPasswordRequired + } + if !user.CheckPassword(password) { + return nil, ErrPasswordIncorrect + } + } + + // Generate a new TOTP key + key, err := totp.Generate(totp.GenerateOpts{ + Issuer: totpIssuer, + AccountName: user.Email, + }) + if err != nil { + return nil, fmt.Errorf("generate totp key: %w", err) + } + + // Generate a random setup token + setupToken, err := generateRandomToken(32) + if err != nil { + return nil, fmt.Errorf("generate setup token: %w", err) + } + + // Store the setup session in cache + session := &TotpSetupSession{ + Secret: key.Secret(), + SetupToken: setupToken, + CreatedAt: time.Now(), + } + + if err := s.cache.SetSetupSession(ctx, userID, session, totpSetupTTL); err != nil { + return nil, fmt.Errorf("store setup session: %w", err) + } + + return &TotpSetupResponse{ + Secret: key.Secret(), + QRCodeURL: key.URL(), + SetupToken: setupToken, + Countdown: int(totpSetupTTL.Seconds()), + }, nil +} + +// CompleteSetup completes the TOTP setup by verifying the code +func (s *TotpService) CompleteSetup(ctx context.Context, userID int64, totpCode, setupToken string) error { + // Check if TOTP feature is enabled globally + if !s.settingService.IsTotpEnabled(ctx) { + return ErrTotpNotEnabled + } + + // Get the setup session + session, err := s.cache.GetSetupSession(ctx, userID) + if err != nil { + return ErrTotpSetupExpired + } + + if session == nil { + return ErrTotpSetupExpired + } + + // Verify the setup token (constant-time comparison) + if subtle.ConstantTimeCompare([]byte(session.SetupToken), []byte(setupToken)) != 1 { + return ErrTotpSetupExpired + } + + // Verify the TOTP code + if !totp.Validate(totpCode, session.Secret) { + return ErrTotpInvalidCode + } + + setupSecretPrefix := "N/A" + if len(session.Secret) >= 4 { + setupSecretPrefix = session.Secret[:4] + } + slog.Debug("totp_complete_setup_before_encrypt", + "user_id", userID, + "secret_len", len(session.Secret), + "secret_prefix", setupSecretPrefix) + + // Encrypt the secret + encryptedSecret, err := s.encryptor.Encrypt(session.Secret) + if err != nil { + return fmt.Errorf("encrypt totp secret: %w", err) + } + + slog.Debug("totp_complete_setup_encrypted", + "user_id", userID, + "encrypted_len", len(encryptedSecret)) + + // Verify encryption by decrypting + decrypted, decErr := s.encryptor.Decrypt(encryptedSecret) + if decErr != nil { + slog.Debug("totp_complete_setup_verify_failed", + "user_id", userID, + "error", decErr) + } else { + decryptedPrefix := "N/A" + if len(decrypted) >= 4 { + decryptedPrefix = decrypted[:4] + } + slog.Debug("totp_complete_setup_verified", + "user_id", userID, + "original_len", len(session.Secret), + "decrypted_len", len(decrypted), + "match", session.Secret == decrypted, + "decrypted_prefix", decryptedPrefix) + } + + // Update user with encrypted TOTP secret + if err := s.userRepo.UpdateTotpSecret(ctx, userID, &encryptedSecret); err != nil { + return fmt.Errorf("update totp secret: %w", err) + } + + // Enable TOTP for the user + if err := s.userRepo.EnableTotp(ctx, userID); err != nil { + return fmt.Errorf("enable totp: %w", err) + } + + // Clean up the setup session + _ = s.cache.DeleteSetupSession(ctx, userID) + + return nil +} + +// Disable disables TOTP for a user +// If email verification is enabled, emailCode is required; otherwise password is required +func (s *TotpService) Disable(ctx context.Context, userID int64, emailCode, password string) error { + // Get user + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + if !user.TotpEnabled { + return ErrTotpNotSetup + } + + // Verify identity based on email verification setting + if s.settingService.IsEmailVerifyEnabled(ctx) { + // Email verification enabled - verify email code + if emailCode == "" { + return ErrVerifyCodeRequired + } + if err := s.emailService.VerifyCode(ctx, user.Email, emailCode); err != nil { + return err + } + } else { + // Email verification disabled - verify password + if password == "" { + return ErrPasswordRequired + } + if !user.CheckPassword(password) { + return ErrPasswordIncorrect + } + } + + // Disable TOTP + if err := s.userRepo.DisableTotp(ctx, userID); err != nil { + return fmt.Errorf("disable totp: %w", err) + } + + return nil +} + +// VerifyCode verifies a TOTP code for a user +func (s *TotpService) VerifyCode(ctx context.Context, userID int64, code string) error { + slog.Debug("totp_verify_code_called", + "user_id", userID, + "code_len", len(code)) + + // Check rate limiting + attempts, err := s.cache.GetVerifyAttempts(ctx, userID) + if err == nil && attempts >= maxTotpAttempts { + return ErrTotpTooManyAttempts + } + + // Get user + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + slog.Debug("totp_verify_get_user_failed", + "user_id", userID, + "error", err) + return infraerrors.InternalServer("TOTP_VERIFY_ERROR", "failed to verify totp code") + } + + if !user.TotpEnabled || user.TotpSecretEncrypted == nil { + slog.Debug("totp_verify_not_setup", + "user_id", userID, + "enabled", user.TotpEnabled, + "has_secret", user.TotpSecretEncrypted != nil) + return ErrTotpNotSetup + } + + slog.Debug("totp_verify_encrypted_secret", + "user_id", userID, + "encrypted_len", len(*user.TotpSecretEncrypted)) + + // Decrypt the secret + secret, err := s.encryptor.Decrypt(*user.TotpSecretEncrypted) + if err != nil { + slog.Debug("totp_verify_decrypt_failed", + "user_id", userID, + "error", err) + return infraerrors.InternalServer("TOTP_VERIFY_ERROR", "failed to verify totp code") + } + + secretPrefix := "N/A" + if len(secret) >= 4 { + secretPrefix = secret[:4] + } + slog.Debug("totp_verify_decrypted", + "user_id", userID, + "secret_len", len(secret), + "secret_prefix", secretPrefix) + + // Verify the code + valid := totp.Validate(code, secret) + slog.Debug("totp_verify_result", + "user_id", userID, + "valid", valid, + "secret_len", len(secret), + "secret_prefix", secretPrefix, + "server_time", time.Now().UTC().Format(time.RFC3339)) + + if !valid { + // Increment failed attempts + _, _ = s.cache.IncrementVerifyAttempts(ctx, userID) + return ErrTotpInvalidCode + } + + // Clear attempt counter on success + _ = s.cache.ClearVerifyAttempts(ctx, userID) + + return nil +} + +// CreateLoginSession creates a temporary login session for 2FA +func (s *TotpService) CreateLoginSession(ctx context.Context, userID int64, email string) (string, error) { + // Generate a random temp token + tempToken, err := generateRandomToken(32) + if err != nil { + return "", fmt.Errorf("generate temp token: %w", err) + } + + session := &TotpLoginSession{ + UserID: userID, + Email: email, + TokenExpiry: time.Now().Add(totpLoginTTL), + } + + if err := s.cache.SetLoginSession(ctx, tempToken, session, totpLoginTTL); err != nil { + return "", fmt.Errorf("store login session: %w", err) + } + + return tempToken, nil +} + +// GetLoginSession retrieves a login session +func (s *TotpService) GetLoginSession(ctx context.Context, tempToken string) (*TotpLoginSession, error) { + return s.cache.GetLoginSession(ctx, tempToken) +} + +// DeleteLoginSession deletes a login session +func (s *TotpService) DeleteLoginSession(ctx context.Context, tempToken string) error { + return s.cache.DeleteLoginSession(ctx, tempToken) +} + +// IsTotpEnabledForUser checks if TOTP is enabled for a specific user +func (s *TotpService) IsTotpEnabledForUser(ctx context.Context, userID int64) (bool, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return false, fmt.Errorf("get user: %w", err) + } + return user.TotpEnabled, nil +} + +// MaskEmail masks an email address for display +func MaskEmail(email string) string { + if len(email) < 3 { + return "***" + } + + atIdx := -1 + for i, c := range email { + if c == '@' { + atIdx = i + break + } + } + + if atIdx == -1 || atIdx < 1 { + return email[:1] + "***" + } + + localPart := email[:atIdx] + domain := email[atIdx:] + + if len(localPart) <= 2 { + return localPart[:1] + "***" + domain + } + + return localPart[:1] + "***" + localPart[len(localPart)-1:] + domain +} + +// generateRandomToken generates a random hex-encoded token +func generateRandomToken(byteLength int) (string, error) { + b := make([]byte, byteLength) + if _, err := rand.Read(b); err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} + +// VerificationMethod represents the method required for TOTP operations +type VerificationMethod struct { + Method string `json:"method"` // "email" or "password" +} + +// GetVerificationMethod returns the verification method for TOTP operations +func (s *TotpService) GetVerificationMethod(ctx context.Context) *VerificationMethod { + if s.settingService.IsEmailVerifyEnabled(ctx) { + return &VerificationMethod{Method: "email"} + } + return &VerificationMethod{Method: "password"} +} + +// SendVerifyCode sends an email verification code for TOTP operations +func (s *TotpService) SendVerifyCode(ctx context.Context, userID int64) error { + // Check if email verification is enabled + if !s.settingService.IsEmailVerifyEnabled(ctx) { + return infraerrors.BadRequest("EMAIL_VERIFY_NOT_ENABLED", "email verification is not enabled") + } + + // Get user email + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + // Get site name for email + siteName := s.settingService.GetSiteName(ctx) + + // Send verification code via queue + return s.emailQueueService.EnqueueVerifyCode(user.Email, siteName) +} diff --git a/backend/internal/service/usage_cleanup.go b/backend/internal/service/usage_cleanup.go new file mode 100644 index 00000000..7e3ffbb9 --- /dev/null +++ b/backend/internal/service/usage_cleanup.go @@ -0,0 +1,74 @@ +package service + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + UsageCleanupStatusPending = "pending" + UsageCleanupStatusRunning = "running" + UsageCleanupStatusSucceeded = "succeeded" + UsageCleanupStatusFailed = "failed" + UsageCleanupStatusCanceled = "canceled" +) + +// UsageCleanupFilters 定义清理任务过滤条件 +// 时间范围为必填,其他字段可选 +// JSON 序列化用于存储任务参数 +// +// start_time/end_time 使用 RFC3339 时间格式 +// 以 UTC 或用户时区解析后的时间为准 +// +// 说明: +// - nil 表示未设置该过滤条件 +// - 过滤条件均为精确匹配 +type UsageCleanupFilters struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + UserID *int64 `json:"user_id,omitempty"` + APIKeyID *int64 `json:"api_key_id,omitempty"` + AccountID *int64 `json:"account_id,omitempty"` + GroupID *int64 `json:"group_id,omitempty"` + Model *string `json:"model,omitempty"` + Stream *bool `json:"stream,omitempty"` + BillingType *int8 `json:"billing_type,omitempty"` +} + +// UsageCleanupTask 表示使用记录清理任务 +// 状态包含 pending/running/succeeded/failed/canceled +type UsageCleanupTask struct { + ID int64 + Status string + Filters UsageCleanupFilters + CreatedBy int64 + DeletedRows int64 + ErrorMsg *string + CanceledBy *int64 + CanceledAt *time.Time + StartedAt *time.Time + FinishedAt *time.Time + CreatedAt time.Time + UpdatedAt time.Time +} + +// UsageCleanupRepository 定义清理任务持久层接口 +type UsageCleanupRepository interface { + CreateTask(ctx context.Context, task *UsageCleanupTask) error + ListTasks(ctx context.Context, params pagination.PaginationParams) ([]UsageCleanupTask, *pagination.PaginationResult, error) + // ClaimNextPendingTask 抢占下一条可执行任务: + // - 优先 pending + // - 若 running 超过 staleRunningAfterSeconds(可能由于进程退出/崩溃/超时),允许重新抢占继续执行 + ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*UsageCleanupTask, error) + // GetTaskStatus 查询任务状态;若不存在返回 sql.ErrNoRows + GetTaskStatus(ctx context.Context, taskID int64) (string, error) + // UpdateTaskProgress 更新任务进度(deleted_rows)用于断点续跑/展示 + UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error + // CancelTask 将任务标记为 canceled(仅允许 pending/running) + CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) + MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error + MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error + DeleteUsageLogsBatch(ctx context.Context, filters UsageCleanupFilters, limit int) (int64, error) +} diff --git a/backend/internal/service/usage_cleanup_service.go b/backend/internal/service/usage_cleanup_service.go new file mode 100644 index 00000000..37f6d375 --- /dev/null +++ b/backend/internal/service/usage_cleanup_service.go @@ -0,0 +1,404 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log" + "net/http" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + usageCleanupWorkerName = "usage_cleanup_worker" +) + +// UsageCleanupService 负责创建与执行使用记录清理任务 +type UsageCleanupService struct { + repo UsageCleanupRepository + timingWheel *TimingWheelService + dashboard *DashboardAggregationService + cfg *config.Config + + running int32 + startOnce sync.Once + stopOnce sync.Once + + workerCtx context.Context + workerCancel context.CancelFunc +} + +func NewUsageCleanupService(repo UsageCleanupRepository, timingWheel *TimingWheelService, dashboard *DashboardAggregationService, cfg *config.Config) *UsageCleanupService { + workerCtx, workerCancel := context.WithCancel(context.Background()) + return &UsageCleanupService{ + repo: repo, + timingWheel: timingWheel, + dashboard: dashboard, + cfg: cfg, + workerCtx: workerCtx, + workerCancel: workerCancel, + } +} + +func describeUsageCleanupFilters(filters UsageCleanupFilters) string { + var parts []string + parts = append(parts, "start="+filters.StartTime.UTC().Format(time.RFC3339)) + parts = append(parts, "end="+filters.EndTime.UTC().Format(time.RFC3339)) + if filters.UserID != nil { + parts = append(parts, fmt.Sprintf("user_id=%d", *filters.UserID)) + } + if filters.APIKeyID != nil { + parts = append(parts, fmt.Sprintf("api_key_id=%d", *filters.APIKeyID)) + } + if filters.AccountID != nil { + parts = append(parts, fmt.Sprintf("account_id=%d", *filters.AccountID)) + } + if filters.GroupID != nil { + parts = append(parts, fmt.Sprintf("group_id=%d", *filters.GroupID)) + } + if filters.Model != nil { + parts = append(parts, "model="+strings.TrimSpace(*filters.Model)) + } + if filters.Stream != nil { + parts = append(parts, fmt.Sprintf("stream=%t", *filters.Stream)) + } + if filters.BillingType != nil { + parts = append(parts, fmt.Sprintf("billing_type=%d", *filters.BillingType)) + } + return strings.Join(parts, " ") +} + +func (s *UsageCleanupService) Start() { + if s == nil { + return + } + if s.cfg != nil && !s.cfg.UsageCleanup.Enabled { + log.Printf("[UsageCleanup] not started (disabled)") + return + } + if s.repo == nil || s.timingWheel == nil { + log.Printf("[UsageCleanup] not started (missing deps)") + return + } + + interval := s.workerInterval() + s.startOnce.Do(func() { + s.timingWheel.ScheduleRecurring(usageCleanupWorkerName, interval, s.runOnce) + log.Printf("[UsageCleanup] started (interval=%s max_range_days=%d batch_size=%d task_timeout=%s)", interval, s.maxRangeDays(), s.batchSize(), s.taskTimeout()) + }) +} + +func (s *UsageCleanupService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.workerCancel != nil { + s.workerCancel() + } + if s.timingWheel != nil { + s.timingWheel.Cancel(usageCleanupWorkerName) + } + log.Printf("[UsageCleanup] stopped") + }) +} + +func (s *UsageCleanupService) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]UsageCleanupTask, *pagination.PaginationResult, error) { + if s == nil || s.repo == nil { + return nil, nil, fmt.Errorf("cleanup service not ready") + } + return s.repo.ListTasks(ctx, params) +} + +func (s *UsageCleanupService) CreateTask(ctx context.Context, filters UsageCleanupFilters, createdBy int64) (*UsageCleanupTask, error) { + if s == nil || s.repo == nil { + return nil, fmt.Errorf("cleanup service not ready") + } + if s.cfg != nil && !s.cfg.UsageCleanup.Enabled { + return nil, infraerrors.New(http.StatusServiceUnavailable, "USAGE_CLEANUP_DISABLED", "usage cleanup is disabled") + } + if createdBy <= 0 { + return nil, infraerrors.BadRequest("USAGE_CLEANUP_INVALID_CREATOR", "invalid creator") + } + + log.Printf("[UsageCleanup] create_task requested: operator=%d %s", createdBy, describeUsageCleanupFilters(filters)) + sanitizeUsageCleanupFilters(&filters) + if err := s.validateFilters(filters); err != nil { + log.Printf("[UsageCleanup] create_task rejected: operator=%d err=%v %s", createdBy, err, describeUsageCleanupFilters(filters)) + return nil, err + } + + task := &UsageCleanupTask{ + Status: UsageCleanupStatusPending, + Filters: filters, + CreatedBy: createdBy, + } + if err := s.repo.CreateTask(ctx, task); err != nil { + log.Printf("[UsageCleanup] create_task persist failed: operator=%d err=%v %s", createdBy, err, describeUsageCleanupFilters(filters)) + return nil, fmt.Errorf("create cleanup task: %w", err) + } + log.Printf("[UsageCleanup] create_task persisted: task=%d operator=%d status=%s deleted_rows=%d %s", task.ID, createdBy, task.Status, task.DeletedRows, describeUsageCleanupFilters(filters)) + go s.runOnce() + return task, nil +} + +func (s *UsageCleanupService) runOnce() { + svc := s + if svc == nil { + return + } + if !atomic.CompareAndSwapInt32(&svc.running, 0, 1) { + log.Printf("[UsageCleanup] run_once skipped: already_running=true") + return + } + defer atomic.StoreInt32(&svc.running, 0) + + parent := context.Background() + if svc.workerCtx != nil { + parent = svc.workerCtx + } + ctx, cancel := context.WithTimeout(parent, svc.taskTimeout()) + defer cancel() + + task, err := svc.repo.ClaimNextPendingTask(ctx, int64(svc.taskTimeout().Seconds())) + if err != nil { + log.Printf("[UsageCleanup] claim pending task failed: %v", err) + return + } + if task == nil { + log.Printf("[UsageCleanup] run_once done: no_task=true") + return + } + + log.Printf("[UsageCleanup] task claimed: task=%d status=%s created_by=%d deleted_rows=%d %s", task.ID, task.Status, task.CreatedBy, task.DeletedRows, describeUsageCleanupFilters(task.Filters)) + svc.executeTask(ctx, task) +} + +func (s *UsageCleanupService) executeTask(ctx context.Context, task *UsageCleanupTask) { + if task == nil { + return + } + + batchSize := s.batchSize() + deletedTotal := task.DeletedRows + start := time.Now() + log.Printf("[UsageCleanup] task started: task=%d batch_size=%d deleted_rows=%d %s", task.ID, batchSize, deletedTotal, describeUsageCleanupFilters(task.Filters)) + var batchNum int + + for { + if ctx != nil && ctx.Err() != nil { + log.Printf("[UsageCleanup] task interrupted: task=%d err=%v", task.ID, ctx.Err()) + return + } + canceled, err := s.isTaskCanceled(ctx, task.ID) + if err != nil { + s.markTaskFailed(task.ID, deletedTotal, err) + return + } + if canceled { + log.Printf("[UsageCleanup] task canceled: task=%d deleted_rows=%d duration=%s", task.ID, deletedTotal, time.Since(start)) + return + } + + batchNum++ + deleted, err := s.repo.DeleteUsageLogsBatch(ctx, task.Filters, batchSize) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // 任务被中断(例如服务停止/超时),保持 running 状态,后续通过 stale reclaim 续跑。 + log.Printf("[UsageCleanup] task interrupted: task=%d err=%v", task.ID, err) + return + } + s.markTaskFailed(task.ID, deletedTotal, err) + return + } + deletedTotal += deleted + if deleted > 0 { + updateCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + if err := s.repo.UpdateTaskProgress(updateCtx, task.ID, deletedTotal); err != nil { + log.Printf("[UsageCleanup] task progress update failed: task=%d deleted_rows=%d err=%v", task.ID, deletedTotal, err) + } + cancel() + } + if batchNum <= 3 || batchNum%20 == 0 || deleted < int64(batchSize) { + log.Printf("[UsageCleanup] task batch done: task=%d batch=%d deleted=%d deleted_total=%d", task.ID, batchNum, deleted, deletedTotal) + } + if deleted == 0 || deleted < int64(batchSize) { + break + } + } + + updateCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := s.repo.MarkTaskSucceeded(updateCtx, task.ID, deletedTotal); err != nil { + log.Printf("[UsageCleanup] update task succeeded failed: task=%d err=%v", task.ID, err) + } else { + log.Printf("[UsageCleanup] task succeeded: task=%d deleted_rows=%d duration=%s", task.ID, deletedTotal, time.Since(start)) + } + + if s.dashboard != nil { + if err := s.dashboard.TriggerRecomputeRange(task.Filters.StartTime, task.Filters.EndTime); err != nil { + log.Printf("[UsageCleanup] trigger dashboard recompute failed: task=%d err=%v", task.ID, err) + } else { + log.Printf("[UsageCleanup] trigger dashboard recompute: task=%d start=%s end=%s", task.ID, task.Filters.StartTime.UTC().Format(time.RFC3339), task.Filters.EndTime.UTC().Format(time.RFC3339)) + } + } +} + +func (s *UsageCleanupService) markTaskFailed(taskID int64, deletedRows int64, err error) { + msg := strings.TrimSpace(err.Error()) + if len(msg) > 500 { + msg = msg[:500] + } + log.Printf("[UsageCleanup] task failed: task=%d deleted_rows=%d err=%s", taskID, deletedRows, msg) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if updateErr := s.repo.MarkTaskFailed(ctx, taskID, deletedRows, msg); updateErr != nil { + log.Printf("[UsageCleanup] update task failed failed: task=%d err=%v", taskID, updateErr) + } +} + +func (s *UsageCleanupService) isTaskCanceled(ctx context.Context, taskID int64) (bool, error) { + if s == nil || s.repo == nil { + return false, fmt.Errorf("cleanup service not ready") + } + checkCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + status, err := s.repo.GetTaskStatus(checkCtx, taskID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + return false, err + } + if status == UsageCleanupStatusCanceled { + log.Printf("[UsageCleanup] task cancel detected: task=%d", taskID) + } + return status == UsageCleanupStatusCanceled, nil +} + +func (s *UsageCleanupService) validateFilters(filters UsageCleanupFilters) error { + if filters.StartTime.IsZero() || filters.EndTime.IsZero() { + return infraerrors.BadRequest("USAGE_CLEANUP_MISSING_RANGE", "start_date and end_date are required") + } + if filters.EndTime.Before(filters.StartTime) { + return infraerrors.BadRequest("USAGE_CLEANUP_INVALID_RANGE", "end_date must be after start_date") + } + maxDays := s.maxRangeDays() + if maxDays > 0 { + delta := filters.EndTime.Sub(filters.StartTime) + if delta > time.Duration(maxDays)*24*time.Hour { + return infraerrors.BadRequest("USAGE_CLEANUP_RANGE_TOO_LARGE", fmt.Sprintf("date range exceeds %d days", maxDays)) + } + } + return nil +} + +func (s *UsageCleanupService) CancelTask(ctx context.Context, taskID int64, canceledBy int64) error { + if s == nil || s.repo == nil { + return fmt.Errorf("cleanup service not ready") + } + if s.cfg != nil && !s.cfg.UsageCleanup.Enabled { + return infraerrors.New(http.StatusServiceUnavailable, "USAGE_CLEANUP_DISABLED", "usage cleanup is disabled") + } + if canceledBy <= 0 { + return infraerrors.BadRequest("USAGE_CLEANUP_INVALID_CANCELLER", "invalid canceller") + } + status, err := s.repo.GetTaskStatus(ctx, taskID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return infraerrors.New(http.StatusNotFound, "USAGE_CLEANUP_TASK_NOT_FOUND", "cleanup task not found") + } + return err + } + log.Printf("[UsageCleanup] cancel_task requested: task=%d operator=%d status=%s", taskID, canceledBy, status) + if status != UsageCleanupStatusPending && status != UsageCleanupStatusRunning { + return infraerrors.New(http.StatusConflict, "USAGE_CLEANUP_CANCEL_CONFLICT", "cleanup task cannot be canceled in current status") + } + ok, err := s.repo.CancelTask(ctx, taskID, canceledBy) + if err != nil { + return err + } + if !ok { + // 状态可能并发改变 + return infraerrors.New(http.StatusConflict, "USAGE_CLEANUP_CANCEL_CONFLICT", "cleanup task cannot be canceled in current status") + } + log.Printf("[UsageCleanup] cancel_task done: task=%d operator=%d", taskID, canceledBy) + return nil +} + +func sanitizeUsageCleanupFilters(filters *UsageCleanupFilters) { + if filters == nil { + return + } + if filters.UserID != nil && *filters.UserID <= 0 { + filters.UserID = nil + } + if filters.APIKeyID != nil && *filters.APIKeyID <= 0 { + filters.APIKeyID = nil + } + if filters.AccountID != nil && *filters.AccountID <= 0 { + filters.AccountID = nil + } + if filters.GroupID != nil && *filters.GroupID <= 0 { + filters.GroupID = nil + } + if filters.Model != nil { + model := strings.TrimSpace(*filters.Model) + if model == "" { + filters.Model = nil + } else { + filters.Model = &model + } + } + if filters.BillingType != nil && *filters.BillingType < 0 { + filters.BillingType = nil + } +} + +func (s *UsageCleanupService) maxRangeDays() int { + if s == nil || s.cfg == nil { + return 31 + } + if s.cfg.UsageCleanup.MaxRangeDays > 0 { + return s.cfg.UsageCleanup.MaxRangeDays + } + return 31 +} + +func (s *UsageCleanupService) batchSize() int { + if s == nil || s.cfg == nil { + return 5000 + } + if s.cfg.UsageCleanup.BatchSize > 0 { + return s.cfg.UsageCleanup.BatchSize + } + return 5000 +} + +func (s *UsageCleanupService) workerInterval() time.Duration { + if s == nil || s.cfg == nil { + return 10 * time.Second + } + if s.cfg.UsageCleanup.WorkerIntervalSeconds > 0 { + return time.Duration(s.cfg.UsageCleanup.WorkerIntervalSeconds) * time.Second + } + return 10 * time.Second +} + +func (s *UsageCleanupService) taskTimeout() time.Duration { + if s == nil || s.cfg == nil { + return 30 * time.Minute + } + if s.cfg.UsageCleanup.TaskTimeoutSeconds > 0 { + return time.Duration(s.cfg.UsageCleanup.TaskTimeoutSeconds) * time.Second + } + return 30 * time.Minute +} diff --git a/backend/internal/service/usage_cleanup_service_test.go b/backend/internal/service/usage_cleanup_service_test.go new file mode 100644 index 00000000..c6c309b6 --- /dev/null +++ b/backend/internal/service/usage_cleanup_service_test.go @@ -0,0 +1,818 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "net/http" + "strings" + "sync" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +type cleanupDeleteResponse struct { + deleted int64 + err error +} + +type cleanupDeleteCall struct { + filters UsageCleanupFilters + limit int +} + +type cleanupMarkCall struct { + taskID int64 + deletedRows int64 + errMsg string +} + +type cleanupRepoStub struct { + mu sync.Mutex + created []*UsageCleanupTask + createErr error + listTasks []UsageCleanupTask + listResult *pagination.PaginationResult + listErr error + claimQueue []*UsageCleanupTask + claimErr error + deleteQueue []cleanupDeleteResponse + deleteCalls []cleanupDeleteCall + markSucceeded []cleanupMarkCall + markFailed []cleanupMarkCall + statusByID map[int64]string + statusErr error + progressCalls []cleanupMarkCall + updateErr error + cancelCalls []int64 + cancelErr error + cancelResult *bool + markFailedErr error +} + +type dashboardRepoStub struct { + recomputeErr error +} + +func (s *dashboardRepoStub) AggregateRange(ctx context.Context, start, end time.Time) error { + return nil +} + +func (s *dashboardRepoStub) RecomputeRange(ctx context.Context, start, end time.Time) error { + return s.recomputeErr +} + +func (s *dashboardRepoStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + return time.Time{}, nil +} + +func (s *dashboardRepoStub) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + return nil +} + +func (s *dashboardRepoStub) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + return nil +} + +func (s *dashboardRepoStub) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + return nil +} + +func (s *dashboardRepoStub) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + return nil +} + +func (s *cleanupRepoStub) CreateTask(ctx context.Context, task *UsageCleanupTask) error { + if task == nil { + return nil + } + s.mu.Lock() + defer s.mu.Unlock() + if s.createErr != nil { + return s.createErr + } + if task.ID == 0 { + task.ID = int64(len(s.created) + 1) + } + if task.CreatedAt.IsZero() { + task.CreatedAt = time.Now().UTC() + } + if task.UpdatedAt.IsZero() { + task.UpdatedAt = task.CreatedAt + } + clone := *task + s.created = append(s.created, &clone) + return nil +} + +func (s *cleanupRepoStub) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]UsageCleanupTask, *pagination.PaginationResult, error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.listTasks, s.listResult, s.listErr +} + +func (s *cleanupRepoStub) ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*UsageCleanupTask, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.claimErr != nil { + return nil, s.claimErr + } + if len(s.claimQueue) == 0 { + return nil, nil + } + task := s.claimQueue[0] + s.claimQueue = s.claimQueue[1:] + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[task.ID] = UsageCleanupStatusRunning + return task, nil +} + +func (s *cleanupRepoStub) GetTaskStatus(ctx context.Context, taskID int64) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.statusErr != nil { + return "", s.statusErr + } + if s.statusByID == nil { + return "", sql.ErrNoRows + } + status, ok := s.statusByID[taskID] + if !ok { + return "", sql.ErrNoRows + } + return status, nil +} + +func (s *cleanupRepoStub) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error { + s.mu.Lock() + defer s.mu.Unlock() + s.progressCalls = append(s.progressCalls, cleanupMarkCall{taskID: taskID, deletedRows: deletedRows}) + if s.updateErr != nil { + return s.updateErr + } + return nil +} + +func (s *cleanupRepoStub) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) { + s.mu.Lock() + defer s.mu.Unlock() + s.cancelCalls = append(s.cancelCalls, taskID) + if s.cancelErr != nil { + return false, s.cancelErr + } + if s.cancelResult != nil { + ok := *s.cancelResult + if ok { + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[taskID] = UsageCleanupStatusCanceled + } + return ok, nil + } + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + status := s.statusByID[taskID] + if status != UsageCleanupStatusPending && status != UsageCleanupStatusRunning { + return false, nil + } + s.statusByID[taskID] = UsageCleanupStatusCanceled + return true, nil +} + +func (s *cleanupRepoStub) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error { + s.mu.Lock() + defer s.mu.Unlock() + s.markSucceeded = append(s.markSucceeded, cleanupMarkCall{taskID: taskID, deletedRows: deletedRows}) + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[taskID] = UsageCleanupStatusSucceeded + return nil +} + +func (s *cleanupRepoStub) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error { + s.mu.Lock() + defer s.mu.Unlock() + s.markFailed = append(s.markFailed, cleanupMarkCall{taskID: taskID, deletedRows: deletedRows, errMsg: errorMsg}) + if s.statusByID == nil { + s.statusByID = map[int64]string{} + } + s.statusByID[taskID] = UsageCleanupStatusFailed + if s.markFailedErr != nil { + return s.markFailedErr + } + return nil +} + +func (s *cleanupRepoStub) DeleteUsageLogsBatch(ctx context.Context, filters UsageCleanupFilters, limit int) (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + s.deleteCalls = append(s.deleteCalls, cleanupDeleteCall{filters: filters, limit: limit}) + if len(s.deleteQueue) == 0 { + return 0, nil + } + resp := s.deleteQueue[0] + s.deleteQueue = s.deleteQueue[1:] + return resp.deleted, resp.err +} + +func TestUsageCleanupServiceCreateTaskSanitizeFilters(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + userID := int64(-1) + apiKeyID := int64(10) + model := " gpt-4 " + billingType := int8(-2) + filters := UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + APIKeyID: &apiKeyID, + Model: &model, + BillingType: &billingType, + } + + task, err := svc.CreateTask(context.Background(), filters, 9) + require.NoError(t, err) + require.Equal(t, UsageCleanupStatusPending, task.Status) + require.Nil(t, task.Filters.UserID) + require.NotNil(t, task.Filters.APIKeyID) + require.Equal(t, apiKeyID, *task.Filters.APIKeyID) + require.NotNil(t, task.Filters.Model) + require.Equal(t, "gpt-4", *task.Filters.Model) + require.Nil(t, task.Filters.BillingType) + require.Equal(t, int64(9), task.CreatedBy) +} + +func TestUsageCleanupServiceCreateTaskInvalidCreator(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + filters := UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + } + _, err := svc.CreateTask(context.Background(), filters, 0) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_INVALID_CREATOR", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskDisabled(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: false}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + filters := UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + } + _, err := svc.CreateTask(context.Background(), filters, 1) + require.Error(t, err) + require.Equal(t, http.StatusServiceUnavailable, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_DISABLED", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskRangeTooLarge(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 1}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(48 * time.Hour) + filters := UsageCleanupFilters{StartTime: start, EndTime: end} + + _, err := svc.CreateTask(context.Background(), filters, 1) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_RANGE_TOO_LARGE", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskMissingRange(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + _, err := svc.CreateTask(context.Background(), UsageCleanupFilters{}, 1) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_MISSING_RANGE", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCreateTaskRepoError(t *testing.T) { + repo := &cleanupRepoStub{createErr: errors.New("db down")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + filters := UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + } + _, err := svc.CreateTask(context.Background(), filters, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "create cleanup task") +} + +func TestUsageCleanupServiceRunOnceSuccess(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(2 * time.Hour) + repo := &cleanupRepoStub{ + claimQueue: []*UsageCleanupTask{ + {ID: 5, Filters: UsageCleanupFilters{StartTime: start, EndTime: end}}, + }, + deleteQueue: []cleanupDeleteResponse{ + {deleted: 2}, + {deleted: 2}, + {deleted: 1}, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2, TaskTimeoutSeconds: 30}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + svc.runOnce() + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.deleteCalls, 3) + require.Equal(t, 2, repo.deleteCalls[0].limit) + require.True(t, repo.deleteCalls[0].filters.StartTime.Equal(start)) + require.True(t, repo.deleteCalls[0].filters.EndTime.Equal(end)) + require.Len(t, repo.markSucceeded, 1) + require.Empty(t, repo.markFailed) + require.Equal(t, int64(5), repo.markSucceeded[0].taskID) + require.Equal(t, int64(5), repo.markSucceeded[0].deletedRows) + require.Equal(t, 2, repo.deleteCalls[0].limit) + require.Equal(t, start, repo.deleteCalls[0].filters.StartTime) + require.Equal(t, end, repo.deleteCalls[0].filters.EndTime) +} + +func TestUsageCleanupServiceRunOnceClaimError(t *testing.T) { + repo := &cleanupRepoStub{claimErr: errors.New("claim failed")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + svc.runOnce() + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) +} + +func TestUsageCleanupServiceRunOnceAlreadyRunning(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + svc.running = 1 + svc.runOnce() +} + +func TestUsageCleanupServiceExecuteTaskFailed(t *testing.T) { + longMsg := strings.Repeat("x", 600) + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {err: errors.New(longMsg)}, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 3}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 11, + Filters: UsageCleanupFilters{ + StartTime: time.Now(), + EndTime: time.Now().Add(24 * time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markFailed, 1) + require.Equal(t, int64(11), repo.markFailed[0].taskID) + require.Equal(t, 500, len(repo.markFailed[0].errMsg)) +} + +func TestUsageCleanupServiceExecuteTaskProgressError(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {deleted: 2}, + {deleted: 0}, + }, + updateErr: errors.New("update failed"), + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 8, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markSucceeded, 1) + require.Empty(t, repo.markFailed) + require.Len(t, repo.progressCalls, 1) +} + +func TestUsageCleanupServiceExecuteTaskDeleteCanceled(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {err: context.Canceled}, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 12, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) +} + +func TestUsageCleanupServiceExecuteTaskContextCanceled(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 9, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + svc.executeTask(ctx, task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) + require.Empty(t, repo.deleteCalls) +} + +func TestUsageCleanupServiceExecuteTaskMarkFailedUpdateError(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {err: errors.New("boom")}, + }, + markFailedErr: errors.New("update failed"), + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 13, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markFailed, 1) + require.Equal(t, int64(13), repo.markFailed[0].taskID) +} + +func TestUsageCleanupServiceExecuteTaskDashboardRecomputeError(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {deleted: 0}, + }, + } + dashboard := NewDashboardAggregationService(&dashboardRepoStub{}, nil, &config.Config{ + DashboardAgg: config.DashboardAggregationConfig{Enabled: false}, + }) + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, dashboard, cfg) + task := &UsageCleanupTask{ + ID: 14, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markSucceeded, 1) +} + +func TestUsageCleanupServiceExecuteTaskDashboardRecomputeSuccess(t *testing.T) { + repo := &cleanupRepoStub{ + deleteQueue: []cleanupDeleteResponse{ + {deleted: 0}, + }, + } + dashboard := NewDashboardAggregationService(&dashboardRepoStub{}, nil, &config.Config{ + DashboardAgg: config.DashboardAggregationConfig{Enabled: true}, + }) + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, dashboard, cfg) + task := &UsageCleanupTask{ + ID: 15, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.markSucceeded, 1) +} + +func TestUsageCleanupServiceExecuteTaskCanceled(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 3: UsageCleanupStatusCanceled, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, BatchSize: 2}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + task := &UsageCleanupTask{ + ID: 3, + Filters: UsageCleanupFilters{ + StartTime: time.Now().UTC(), + EndTime: time.Now().UTC().Add(time.Hour), + }, + } + + svc.executeTask(context.Background(), task) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Empty(t, repo.deleteCalls) + require.Empty(t, repo.markSucceeded) + require.Empty(t, repo.markFailed) +} + +func TestUsageCleanupServiceCancelTaskSuccess(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 5: UsageCleanupStatusPending, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 5, 9) + require.NoError(t, err) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Equal(t, UsageCleanupStatusCanceled, repo.statusByID[5]) + require.Len(t, repo.cancelCalls, 1) +} + +func TestUsageCleanupServiceCancelTaskDisabled(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: false}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 1, 2) + require.Error(t, err) + require.Equal(t, http.StatusServiceUnavailable, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_DISABLED", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskNotFound(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 999, 1) + require.Error(t, err) + require.Equal(t, http.StatusNotFound, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_TASK_NOT_FOUND", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskStatusError(t *testing.T) { + repo := &cleanupRepoStub{statusErr: errors.New("status broken")} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "status broken") +} + +func TestUsageCleanupServiceCancelTaskConflict(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusSucceeded, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Equal(t, http.StatusConflict, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_CANCEL_CONFLICT", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskRepoConflict(t *testing.T) { + shouldCancel := false + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusPending, + }, + cancelResult: &shouldCancel, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Equal(t, http.StatusConflict, infraerrors.Code(err)) + require.Equal(t, "USAGE_CLEANUP_CANCEL_CONFLICT", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceCancelTaskRepoError(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusPending, + }, + cancelErr: errors.New("cancel failed"), + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "cancel failed") +} + +func TestUsageCleanupServiceCancelTaskInvalidCanceller(t *testing.T) { + repo := &cleanupRepoStub{ + statusByID: map[int64]string{ + 7: UsageCleanupStatusRunning, + }, + } + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svc := NewUsageCleanupService(repo, nil, nil, cfg) + + err := svc.CancelTask(context.Background(), 7, 0) + require.Error(t, err) + require.Equal(t, "USAGE_CLEANUP_INVALID_CANCELLER", infraerrors.Reason(err)) +} + +func TestUsageCleanupServiceListTasks(t *testing.T) { + repo := &cleanupRepoStub{ + listTasks: []UsageCleanupTask{{ID: 1}, {ID: 2}}, + listResult: &pagination.PaginationResult{ + Total: 2, + Page: 1, + PageSize: 20, + Pages: 1, + }, + } + svc := NewUsageCleanupService(repo, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + + tasks, result, err := svc.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.NoError(t, err) + require.Len(t, tasks, 2) + require.Equal(t, int64(2), result.Total) +} + +func TestUsageCleanupServiceListTasksNotReady(t *testing.T) { + var nilSvc *UsageCleanupService + _, _, err := nilSvc.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) + + svc := NewUsageCleanupService(nil, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + _, _, err = svc.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}) + require.Error(t, err) +} + +func TestUsageCleanupServiceDefaultsAndLifecycle(t *testing.T) { + var nilSvc *UsageCleanupService + require.Equal(t, 31, nilSvc.maxRangeDays()) + require.Equal(t, 5000, nilSvc.batchSize()) + require.Equal(t, 10*time.Second, nilSvc.workerInterval()) + require.Equal(t, 30*time.Minute, nilSvc.taskTimeout()) + nilSvc.Start() + nilSvc.Stop() + + repo := &cleanupRepoStub{} + cfgDisabled := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: false}} + svcDisabled := NewUsageCleanupService(repo, nil, nil, cfgDisabled) + svcDisabled.Start() + svcDisabled.Stop() + + timingWheel, err := NewTimingWheelService() + require.NoError(t, err) + + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, WorkerIntervalSeconds: 5}} + svc := NewUsageCleanupService(repo, timingWheel, nil, cfg) + require.Equal(t, 5*time.Second, svc.workerInterval()) + svc.Start() + svc.Stop() + + cfgFallback := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} + svcFallback := NewUsageCleanupService(repo, timingWheel, nil, cfgFallback) + require.Equal(t, 31, svcFallback.maxRangeDays()) + require.Equal(t, 5000, svcFallback.batchSize()) + require.Equal(t, 10*time.Second, svcFallback.workerInterval()) + + svcMissingDeps := NewUsageCleanupService(nil, nil, nil, cfgFallback) + svcMissingDeps.Start() +} + +func TestSanitizeUsageCleanupFiltersModelEmpty(t *testing.T) { + model := " " + apiKeyID := int64(-5) + accountID := int64(-1) + groupID := int64(-2) + filters := UsageCleanupFilters{ + UserID: &apiKeyID, + APIKeyID: &apiKeyID, + AccountID: &accountID, + GroupID: &groupID, + Model: &model, + } + + sanitizeUsageCleanupFilters(&filters) + require.Nil(t, filters.UserID) + require.Nil(t, filters.APIKeyID) + require.Nil(t, filters.AccountID) + require.Nil(t, filters.GroupID) + require.Nil(t, filters.Model) +} + +func TestDescribeUsageCleanupFiltersAllFields(t *testing.T) { + start := time.Date(2024, 2, 1, 10, 0, 0, 0, time.UTC) + end := start.Add(2 * time.Hour) + userID := int64(1) + apiKeyID := int64(2) + accountID := int64(3) + groupID := int64(4) + model := " gpt-4 " + stream := true + billingType := int8(2) + filters := UsageCleanupFilters{ + StartTime: start, + EndTime: end, + UserID: &userID, + APIKeyID: &apiKeyID, + AccountID: &accountID, + GroupID: &groupID, + Model: &model, + Stream: &stream, + BillingType: &billingType, + } + + desc := describeUsageCleanupFilters(filters) + require.Equal(t, "start=2024-02-01T10:00:00Z end=2024-02-01T12:00:00Z user_id=1 api_key_id=2 account_id=3 group_id=4 model=gpt-4 stream=true billing_type=2", desc) +} + +func TestUsageCleanupServiceIsTaskCanceledNotFound(t *testing.T) { + repo := &cleanupRepoStub{} + svc := NewUsageCleanupService(repo, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + + canceled, err := svc.isTaskCanceled(context.Background(), 9) + require.NoError(t, err) + require.False(t, canceled) +} + +func TestUsageCleanupServiceIsTaskCanceledError(t *testing.T) { + repo := &cleanupRepoStub{statusErr: errors.New("status err")} + svc := NewUsageCleanupService(repo, nil, nil, &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}}) + + _, err := svc.isTaskCanceled(context.Background(), 9) + require.Error(t, err) + require.Contains(t, err.Error(), "status err") +} diff --git a/backend/internal/service/user_service.go b/backend/internal/service/user_service.go index 1734914a..99bf7fd0 100644 --- a/backend/internal/service/user_service.go +++ b/backend/internal/service/user_service.go @@ -38,6 +38,11 @@ type UserRepository interface { UpdateConcurrency(ctx context.Context, id int64, amount int) error ExistsByEmail(ctx context.Context, email string) (bool, error) RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) + + // TOTP 相关方法 + UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error + EnableTotp(ctx context.Context, userID int64) error + DisableTotp(ctx context.Context, userID int64) error } // UpdateProfileRequest 更新用户资料请求 diff --git a/backend/internal/service/user_subscription_port.go b/backend/internal/service/user_subscription_port.go index abf4dffd..2dfc8d02 100644 --- a/backend/internal/service/user_subscription_port.go +++ b/backend/internal/service/user_subscription_port.go @@ -18,7 +18,7 @@ type UserSubscriptionRepository interface { ListByUserID(ctx context.Context, userID int64) ([]UserSubscription, error) ListActiveByUserID(ctx context.Context, userID int64) ([]UserSubscription, error) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]UserSubscription, *pagination.PaginationResult, error) - List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]UserSubscription, *pagination.PaginationResult, error) + List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]UserSubscription, *pagination.PaginationResult, error) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index acc0a5fb..4b721bb6 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -1,6 +1,7 @@ package service import ( + "context" "database/sql" "time" @@ -43,9 +44,10 @@ func ProvideTokenRefreshService( geminiOAuthService *GeminiOAuthService, antigravityOAuthService *AntigravityOAuthService, cacheInvalidator TokenCacheInvalidator, + schedulerCache SchedulerCache, cfg *config.Config, ) *TokenRefreshService { - svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cacheInvalidator, cfg) + svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cacheInvalidator, schedulerCache, cfg) svc.Start() return svc } @@ -57,6 +59,13 @@ func ProvideDashboardAggregationService(repo DashboardAggregationRepository, tim return svc } +// ProvideUsageCleanupService 创建并启动使用记录清理任务服务 +func ProvideUsageCleanupService(repo UsageCleanupRepository, timingWheel *TimingWheelService, dashboardAgg *DashboardAggregationService, cfg *config.Config) *UsageCleanupService { + svc := NewUsageCleanupService(repo, timingWheel, dashboardAgg, cfg) + svc.Start() + return svc +} + // ProvideAccountExpiryService creates and starts AccountExpiryService. func ProvideAccountExpiryService(accountRepo AccountRepository) *AccountExpiryService { svc := NewAccountExpiryService(accountRepo, time.Minute) @@ -64,6 +73,13 @@ func ProvideAccountExpiryService(accountRepo AccountRepository) *AccountExpirySe return svc } +// ProvideSubscriptionExpiryService creates and starts SubscriptionExpiryService. +func ProvideSubscriptionExpiryService(userSubRepo UserSubscriptionRepository) *SubscriptionExpiryService { + svc := NewSubscriptionExpiryService(userSubRepo, time.Minute) + svc.Start() + return svc +} + // ProvideTimingWheelService creates and starts TimingWheelService func ProvideTimingWheelService() (*TimingWheelService, error) { svc, err := NewTimingWheelService() @@ -189,6 +205,8 @@ func ProvideOpsScheduledReportService( // ProvideAPIKeyAuthCacheInvalidator 提供 API Key 认证缓存失效能力 func ProvideAPIKeyAuthCacheInvalidator(apiKeyService *APIKeyService) APIKeyAuthCacheInvalidator { + // Start Pub/Sub subscriber for L1 cache invalidation across instances + apiKeyService.StartAuthCacheInvalidationSubscriber(context.Background()) return apiKeyService } @@ -209,6 +227,7 @@ var ProviderSet = wire.NewSet( ProvidePricingService, NewBillingService, NewBillingCacheService, + NewAnnouncementService, NewAdminService, NewGatewayService, NewOpenAIGatewayService, @@ -246,10 +265,13 @@ var ProviderSet = wire.NewSet( ProvideUpdateService, ProvideTokenRefreshService, ProvideAccountExpiryService, + ProvideSubscriptionExpiryService, ProvideTimingWheelService, ProvideDashboardAggregationService, + ProvideUsageCleanupService, ProvideDeferredService, NewAntigravityQuotaFetcher, NewUserAttributeService, NewUsageCache, + NewTotpService, ) diff --git a/backend/internal/setup/cli.go b/backend/internal/setup/cli.go index 03ac3f66..2b323acf 100644 --- a/backend/internal/setup/cli.go +++ b/backend/internal/setup/cli.go @@ -149,6 +149,8 @@ func RunCLI() error { fmt.Println(" Invalid Redis DB. Must be between 0 and 15.") } + cfg.Redis.EnableTLS = promptConfirm(reader, "Enable Redis TLS?") + fmt.Println() fmt.Print("Testing Redis connection... ") if err := TestRedisConnection(&cfg.Redis); err != nil { @@ -205,6 +207,7 @@ func RunCLI() error { fmt.Println("── Configuration Summary ──") fmt.Printf("Database: %s@%s:%d/%s\n", cfg.Database.User, cfg.Database.Host, cfg.Database.Port, cfg.Database.DBName) fmt.Printf("Redis: %s:%d\n", cfg.Redis.Host, cfg.Redis.Port) + fmt.Printf("Redis TLS: %s\n", map[bool]string{true: "enabled", false: "disabled"}[cfg.Redis.EnableTLS]) fmt.Printf("Admin: %s\n", cfg.Admin.Email) fmt.Printf("Server: :%d\n", cfg.Server.Port) fmt.Println() diff --git a/backend/internal/setup/handler.go b/backend/internal/setup/handler.go index 1c613dfd..1531c97b 100644 --- a/backend/internal/setup/handler.go +++ b/backend/internal/setup/handler.go @@ -176,10 +176,11 @@ func testDatabase(c *gin.Context) { // TestRedisRequest represents Redis test request type TestRedisRequest struct { - Host string `json:"host" binding:"required"` - Port int `json:"port" binding:"required"` - Password string `json:"password"` - DB int `json:"db"` + Host string `json:"host" binding:"required"` + Port int `json:"port" binding:"required"` + Password string `json:"password"` + DB int `json:"db"` + EnableTLS bool `json:"enable_tls"` } // testRedis tests Redis connection @@ -205,10 +206,11 @@ func testRedis(c *gin.Context) { } cfg := &RedisConfig{ - Host: req.Host, - Port: req.Port, - Password: req.Password, - DB: req.DB, + Host: req.Host, + Port: req.Port, + Password: req.Password, + DB: req.DB, + EnableTLS: req.EnableTLS, } if err := TestRedisConnection(cfg); err != nil { diff --git a/backend/internal/setup/setup.go b/backend/internal/setup/setup.go index 65118161..f81f75cf 100644 --- a/backend/internal/setup/setup.go +++ b/backend/internal/setup/setup.go @@ -3,6 +3,7 @@ package setup import ( "context" "crypto/rand" + "crypto/tls" "database/sql" "encoding/hex" "fmt" @@ -79,10 +80,11 @@ type DatabaseConfig struct { } type RedisConfig struct { - Host string `json:"host" yaml:"host"` - Port int `json:"port" yaml:"port"` - Password string `json:"password" yaml:"password"` - DB int `json:"db" yaml:"db"` + Host string `json:"host" yaml:"host"` + Port int `json:"port" yaml:"port"` + Password string `json:"password" yaml:"password"` + DB int `json:"db" yaml:"db"` + EnableTLS bool `json:"enable_tls" yaml:"enable_tls"` } type AdminConfig struct { @@ -199,11 +201,20 @@ func TestDatabaseConnection(cfg *DatabaseConfig) error { // TestRedisConnection tests the Redis connection func TestRedisConnection(cfg *RedisConfig) error { - rdb := redis.NewClient(&redis.Options{ + opts := &redis.Options{ Addr: fmt.Sprintf("%s:%d", cfg.Host, cfg.Port), Password: cfg.Password, DB: cfg.DB, - }) + } + + if cfg.EnableTLS { + opts.TLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: cfg.Host, + } + } + + rdb := redis.NewClient(opts) defer func() { if err := rdb.Close(); err != nil { log.Printf("failed to close redis client: %v", err) @@ -485,10 +496,11 @@ func AutoSetupFromEnv() error { SSLMode: getEnvOrDefault("DATABASE_SSLMODE", "disable"), }, Redis: RedisConfig{ - Host: getEnvOrDefault("REDIS_HOST", "localhost"), - Port: getEnvIntOrDefault("REDIS_PORT", 6379), - Password: getEnvOrDefault("REDIS_PASSWORD", ""), - DB: getEnvIntOrDefault("REDIS_DB", 0), + Host: getEnvOrDefault("REDIS_HOST", "localhost"), + Port: getEnvIntOrDefault("REDIS_PORT", 6379), + Password: getEnvOrDefault("REDIS_PASSWORD", ""), + DB: getEnvIntOrDefault("REDIS_DB", 0), + EnableTLS: getEnvOrDefault("REDIS_ENABLE_TLS", "false") == "true", }, Admin: AdminConfig{ Email: getEnvOrDefault("ADMIN_EMAIL", "admin@sub2api.local"), diff --git a/backend/internal/util/urlvalidator/validator.go b/backend/internal/util/urlvalidator/validator.go index 56a888b9..49df015b 100644 --- a/backend/internal/util/urlvalidator/validator.go +++ b/backend/internal/util/urlvalidator/validator.go @@ -46,7 +46,7 @@ func ValidateURLFormat(raw string, allowInsecureHTTP bool) (string, error) { } } - return trimmed, nil + return strings.TrimRight(trimmed, "/"), nil } func ValidateHTTPSURL(raw string, opts ValidationOptions) (string, error) { diff --git a/backend/internal/util/urlvalidator/validator_test.go b/backend/internal/util/urlvalidator/validator_test.go index b7f9ffed..f9745da3 100644 --- a/backend/internal/util/urlvalidator/validator_test.go +++ b/backend/internal/util/urlvalidator/validator_test.go @@ -21,4 +21,31 @@ func TestValidateURLFormat(t *testing.T) { if _, err := ValidateURLFormat("https://example.com:bad", true); err == nil { t.Fatalf("expected invalid port to fail") } + + // 验证末尾斜杠被移除 + normalized, err := ValidateURLFormat("https://example.com/", false) + if err != nil { + t.Fatalf("expected trailing slash url to pass, got %v", err) + } + if normalized != "https://example.com" { + t.Fatalf("expected trailing slash to be removed, got %s", normalized) + } + + // 验证多个末尾斜杠被移除 + normalized, err = ValidateURLFormat("https://example.com///", false) + if err != nil { + t.Fatalf("expected multiple trailing slashes to pass, got %v", err) + } + if normalized != "https://example.com" { + t.Fatalf("expected all trailing slashes to be removed, got %s", normalized) + } + + // 验证带路径的 URL 末尾斜杠被移除 + normalized, err = ValidateURLFormat("https://example.com/api/v1/", false) + if err != nil { + t.Fatalf("expected trailing slash url with path to pass, got %v", err) + } + if normalized != "https://example.com/api/v1" { + t.Fatalf("expected trailing slash to be removed from path, got %s", normalized) + } } diff --git a/backend/migrations/006_add_users_allowed_groups_compat.sql b/backend/migrations/006_add_users_allowed_groups_compat.sql new file mode 100644 index 00000000..262945d4 --- /dev/null +++ b/backend/migrations/006_add_users_allowed_groups_compat.sql @@ -0,0 +1,15 @@ +-- 兼容旧库:若尚未创建 user_allowed_groups,则确保 users.allowed_groups 存在,避免 007 迁移回填失败。 +DO $$ +BEGIN + IF to_regclass('public.user_allowed_groups') IS NULL THEN + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'users' + ) THEN + ALTER TABLE users + ADD COLUMN IF NOT EXISTS allowed_groups BIGINT[] DEFAULT NULL; + END IF; + END IF; +END $$; diff --git a/backend/migrations/006b_guard_users_allowed_groups.sql b/backend/migrations/006b_guard_users_allowed_groups.sql new file mode 100644 index 00000000..79771bf5 --- /dev/null +++ b/backend/migrations/006b_guard_users_allowed_groups.sql @@ -0,0 +1,27 @@ +-- 兼容缺失 users.allowed_groups 的老库,确保 007 回填可执行。 +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = 'users' + ) THEN + IF NOT EXISTS ( + SELECT 1 + FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = 'users' + AND column_name = 'allowed_groups' + ) THEN + IF NOT EXISTS ( + SELECT 1 + FROM schema_migrations + WHERE filename = '014_drop_legacy_allowed_groups.sql' + ) THEN + ALTER TABLE users + ADD COLUMN IF NOT EXISTS allowed_groups BIGINT[] DEFAULT NULL; + END IF; + END IF; + END IF; +END $$; diff --git a/backend/migrations/042_add_usage_cleanup_tasks.sql b/backend/migrations/042_add_usage_cleanup_tasks.sql new file mode 100644 index 00000000..ce4be91f --- /dev/null +++ b/backend/migrations/042_add_usage_cleanup_tasks.sql @@ -0,0 +1,21 @@ +-- 042_add_usage_cleanup_tasks.sql +-- 使用记录清理任务表 + +CREATE TABLE IF NOT EXISTS usage_cleanup_tasks ( + id BIGSERIAL PRIMARY KEY, + status VARCHAR(20) NOT NULL, + filters JSONB NOT NULL, + created_by BIGINT NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + deleted_rows BIGINT NOT NULL DEFAULT 0, + error_message TEXT, + started_at TIMESTAMPTZ, + finished_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_usage_cleanup_tasks_status_created_at + ON usage_cleanup_tasks(status, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_usage_cleanup_tasks_created_at + ON usage_cleanup_tasks(created_at DESC); diff --git a/backend/migrations/042_add_ops_system_metrics_switch_count.sql b/backend/migrations/042b_add_ops_system_metrics_switch_count.sql similarity index 100% rename from backend/migrations/042_add_ops_system_metrics_switch_count.sql rename to backend/migrations/042b_add_ops_system_metrics_switch_count.sql diff --git a/backend/migrations/043_add_usage_cleanup_cancel_audit.sql b/backend/migrations/043_add_usage_cleanup_cancel_audit.sql new file mode 100644 index 00000000..42ca6696 --- /dev/null +++ b/backend/migrations/043_add_usage_cleanup_cancel_audit.sql @@ -0,0 +1,10 @@ +-- 043_add_usage_cleanup_cancel_audit.sql +-- usage_cleanup_tasks 取消任务审计字段 + +ALTER TABLE usage_cleanup_tasks + ADD COLUMN IF NOT EXISTS canceled_by BIGINT REFERENCES users(id) ON DELETE SET NULL, + ADD COLUMN IF NOT EXISTS canceled_at TIMESTAMPTZ; + +CREATE INDEX IF NOT EXISTS idx_usage_cleanup_tasks_canceled_at + ON usage_cleanup_tasks(canceled_at DESC); + diff --git a/backend/migrations/043_add_group_invalid_request_fallback.sql b/backend/migrations/043b_add_group_invalid_request_fallback.sql similarity index 92% rename from backend/migrations/043_add_group_invalid_request_fallback.sql rename to backend/migrations/043b_add_group_invalid_request_fallback.sql index 1c792704..0c6206aa 100644 --- a/backend/migrations/043_add_group_invalid_request_fallback.sql +++ b/backend/migrations/043b_add_group_invalid_request_fallback.sql @@ -1,4 +1,4 @@ --- 043_add_group_invalid_request_fallback.sql +-- 043b_add_group_invalid_request_fallback.sql -- 添加无效请求兜底分组配置 -- 添加 fallback_group_id_on_invalid_request 字段:无效请求兜底使用的分组 diff --git a/backend/migrations/044_add_user_totp.sql b/backend/migrations/044_add_user_totp.sql new file mode 100644 index 00000000..6e157a68 --- /dev/null +++ b/backend/migrations/044_add_user_totp.sql @@ -0,0 +1,12 @@ +-- 为 users 表添加 TOTP 双因素认证字段 +ALTER TABLE users + ADD COLUMN IF NOT EXISTS totp_secret_encrypted TEXT DEFAULT NULL, + ADD COLUMN IF NOT EXISTS totp_enabled BOOLEAN NOT NULL DEFAULT FALSE, + ADD COLUMN IF NOT EXISTS totp_enabled_at TIMESTAMPTZ DEFAULT NULL; + +COMMENT ON COLUMN users.totp_secret_encrypted IS 'AES-256-GCM 加密的 TOTP 密钥'; +COMMENT ON COLUMN users.totp_enabled IS '是否启用 TOTP 双因素认证'; +COMMENT ON COLUMN users.totp_enabled_at IS 'TOTP 启用时间'; + +-- 创建索引以支持快速查询启用 2FA 的用户 +CREATE INDEX IF NOT EXISTS idx_users_totp_enabled ON users(totp_enabled) WHERE deleted_at IS NULL AND totp_enabled = true; diff --git a/backend/migrations/044_add_group_mcp_xml_inject.sql b/backend/migrations/044b_add_group_mcp_xml_inject.sql similarity index 100% rename from backend/migrations/044_add_group_mcp_xml_inject.sql rename to backend/migrations/044b_add_group_mcp_xml_inject.sql diff --git a/backend/migrations/045_add_announcements.sql b/backend/migrations/045_add_announcements.sql new file mode 100644 index 00000000..cfb9b4b5 --- /dev/null +++ b/backend/migrations/045_add_announcements.sql @@ -0,0 +1,44 @@ +-- 创建公告表 +CREATE TABLE IF NOT EXISTS announcements ( + id BIGSERIAL PRIMARY KEY, + title VARCHAR(200) NOT NULL, + content TEXT NOT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'draft', + targeting JSONB NOT NULL DEFAULT '{}'::jsonb, + starts_at TIMESTAMPTZ DEFAULT NULL, + ends_at TIMESTAMPTZ DEFAULT NULL, + created_by BIGINT DEFAULT NULL REFERENCES users(id) ON DELETE SET NULL, + updated_by BIGINT DEFAULT NULL REFERENCES users(id) ON DELETE SET NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- 公告已读表 +CREATE TABLE IF NOT EXISTS announcement_reads ( + id BIGSERIAL PRIMARY KEY, + announcement_id BIGINT NOT NULL REFERENCES announcements(id) ON DELETE CASCADE, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + read_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(announcement_id, user_id) +); + +-- 索引 +CREATE INDEX IF NOT EXISTS idx_announcements_status ON announcements(status); +CREATE INDEX IF NOT EXISTS idx_announcements_starts_at ON announcements(starts_at); +CREATE INDEX IF NOT EXISTS idx_announcements_ends_at ON announcements(ends_at); +CREATE INDEX IF NOT EXISTS idx_announcements_created_at ON announcements(created_at); + +CREATE INDEX IF NOT EXISTS idx_announcement_reads_announcement_id ON announcement_reads(announcement_id); +CREATE INDEX IF NOT EXISTS idx_announcement_reads_user_id ON announcement_reads(user_id); +CREATE INDEX IF NOT EXISTS idx_announcement_reads_read_at ON announcement_reads(read_at); + +COMMENT ON TABLE announcements IS '系统公告'; +COMMENT ON COLUMN announcements.status IS '状态: draft, active, archived'; +COMMENT ON COLUMN announcements.targeting IS '展示条件(JSON 规则)'; +COMMENT ON COLUMN announcements.starts_at IS '开始展示时间(为空表示立即生效)'; +COMMENT ON COLUMN announcements.ends_at IS '结束展示时间(为空表示永久生效)'; + +COMMENT ON TABLE announcement_reads IS '公告已读记录'; +COMMENT ON COLUMN announcement_reads.read_at IS '用户首次已读时间'; + diff --git a/config.yaml b/config.yaml index 424ce9eb..19f77221 100644 --- a/config.yaml +++ b/config.yaml @@ -251,6 +251,27 @@ dashboard_aggregation: # 日聚合保留天数 daily_days: 730 +# ============================================================================= +# Usage Cleanup Task Configuration +# 使用记录清理任务配置(重启生效) +# ============================================================================= +usage_cleanup: + # Enable cleanup task worker + # 启用清理任务执行器 + enabled: true + # Max date range (days) per task + # 单次任务最大时间跨度(天) + max_range_days: 31 + # Batch delete size + # 单批删除数量 + batch_size: 5000 + # Worker interval (seconds) + # 执行器轮询间隔(秒) + worker_interval_seconds: 10 + # Task execution timeout (seconds) + # 单次任务最大执行时长(秒) + task_timeout_seconds: 1800 + # ============================================================================= # Concurrency Wait Configuration # 并发等待配置 @@ -301,6 +322,9 @@ redis: # Database number (0-15) # 数据库编号(0-15) db: 0 + # Enable TLS/SSL connection + # 是否启用 TLS/SSL 连接 + enable_tls: false # ============================================================================= # Ops Monitoring (Optional) diff --git a/deploy/.env.example b/deploy/.env.example index f21a3c62..25096c3d 100644 --- a/deploy/.env.example +++ b/deploy/.env.example @@ -40,6 +40,7 @@ POSTGRES_DB=sub2api # Leave empty for no password (default for local development) REDIS_PASSWORD= REDIS_DB=0 +REDIS_ENABLE_TLS=false # ----------------------------------------------------------------------------- # Admin Account @@ -61,6 +62,18 @@ ADMIN_PASSWORD= JWT_SECRET= JWT_EXPIRE_HOUR=24 +# ----------------------------------------------------------------------------- +# TOTP (2FA) Configuration +# TOTP(双因素认证)配置 +# ----------------------------------------------------------------------------- +# IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty, a +# random key will be generated on each startup, causing all existing TOTP +# configurations to become invalid (users won't be able to login with 2FA). +# Generate a secure key: openssl rand -hex 32 +# 重要:设置固定的 TOTP 加密密钥。如果留空,每次启动将生成随机密钥, +# 导致现有的 TOTP 配置失效(用户无法使用双因素认证登录)。 +TOTP_ENCRYPTION_KEY= + # ----------------------------------------------------------------------------- # Configuration File (Optional) # ----------------------------------------------------------------------------- diff --git a/deploy/.gitignore b/deploy/.gitignore new file mode 100644 index 00000000..29a15135 --- /dev/null +++ b/deploy/.gitignore @@ -0,0 +1,19 @@ +# ============================================================================= +# Sub2API Deploy Directory - Git Ignore +# ============================================================================= + +# Data directories (generated at runtime when using docker-compose.local.yml) +data/ +postgres_data/ +redis_data/ + +# Environment configuration (contains sensitive information) +.env + +# Backup files +*.backup +*.bak + +# Temporary files +*.tmp +*.log diff --git a/deploy/README.md b/deploy/README.md index f697247d..091d8ad7 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -13,7 +13,9 @@ This directory contains files for deploying Sub2API on Linux servers. | File | Description | |------|-------------| -| `docker-compose.yml` | Docker Compose configuration | +| `docker-compose.yml` | Docker Compose configuration (named volumes) | +| `docker-compose.local.yml` | Docker Compose configuration (local directories, easy migration) | +| `docker-deploy.sh` | **One-click Docker deployment script (recommended)** | | `.env.example` | Docker environment variables template | | `DOCKER.md` | Docker Hub documentation | | `install.sh` | One-click binary installation script | @@ -24,7 +26,45 @@ This directory contains files for deploying Sub2API on Linux servers. ## Docker Deployment (Recommended) -### Quick Start +### Method 1: One-Click Deployment (Recommended) + +Use the automated preparation script for the easiest setup: + +```bash +# Download and run the preparation script +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash + +# Or download first, then run +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh -o docker-deploy.sh +chmod +x docker-deploy.sh +./docker-deploy.sh +``` + +**What the script does:** +- Downloads `docker-compose.local.yml` and `.env.example` +- Automatically generates secure secrets (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD) +- Creates `.env` file with generated secrets +- Creates necessary data directories (data/, postgres_data/, redis_data/) +- **Displays generated credentials** (POSTGRES_PASSWORD, JWT_SECRET, etc.) + +**After running the script:** +```bash +# Start services +docker-compose -f docker-compose.local.yml up -d + +# View logs +docker-compose -f docker-compose.local.yml logs -f sub2api + +# If admin password was auto-generated, find it in logs: +docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password" + +# Access Web UI +# http://localhost:8080 +``` + +### Method 2: Manual Deployment + +If you prefer manual control: ```bash # Clone repository @@ -33,18 +73,36 @@ cd sub2api/deploy # Configure environment cp .env.example .env -nano .env # Set POSTGRES_PASSWORD (required) +nano .env # Set POSTGRES_PASSWORD and other required variables -# Start all services -docker-compose up -d +# Generate secure secrets (recommended) +JWT_SECRET=$(openssl rand -hex 32) +TOTP_ENCRYPTION_KEY=$(openssl rand -hex 32) +echo "JWT_SECRET=${JWT_SECRET}" >> .env +echo "TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}" >> .env + +# Create data directories +mkdir -p data postgres_data redis_data + +# Start all services using local directory version +docker-compose -f docker-compose.local.yml up -d # View logs (check for auto-generated admin password) -docker-compose logs -f sub2api +docker-compose -f docker-compose.local.yml logs -f sub2api # Access Web UI # http://localhost:8080 ``` +### Deployment Version Comparison + +| Version | Data Storage | Migration | Best For | +|---------|-------------|-----------|----------| +| **docker-compose.local.yml** | Local directories (./data, ./postgres_data, ./redis_data) | ✅ Easy (tar entire directory) | Production, need frequent backups/migration | +| **docker-compose.yml** | Named volumes (/var/lib/docker/volumes/) | ⚠️ Requires docker commands | Simple setup, don't need migration | + +**Recommendation:** Use `docker-compose.local.yml` (deployed by `docker-deploy.sh`) for easier data management and migration. + ### How Auto-Setup Works When using Docker Compose with `AUTO_SETUP=true`: @@ -89,6 +147,32 @@ SELECT ### Commands +For **local directory version** (docker-compose.local.yml): + +```bash +# Start services +docker-compose -f docker-compose.local.yml up -d + +# Stop services +docker-compose -f docker-compose.local.yml down + +# View logs +docker-compose -f docker-compose.local.yml logs -f sub2api + +# Restart Sub2API only +docker-compose -f docker-compose.local.yml restart sub2api + +# Update to latest version +docker-compose -f docker-compose.local.yml pull +docker-compose -f docker-compose.local.yml up -d + +# Remove all data (caution!) +docker-compose -f docker-compose.local.yml down +rm -rf data/ postgres_data/ redis_data/ +``` + +For **named volumes version** (docker-compose.yml): + ```bash # Start services docker-compose up -d @@ -115,10 +199,11 @@ docker-compose down -v | Variable | Required | Default | Description | |----------|----------|---------|-------------| | `POSTGRES_PASSWORD` | **Yes** | - | PostgreSQL password | +| `JWT_SECRET` | **Recommended** | *(auto-generated)* | JWT secret (fixed for persistent sessions) | +| `TOTP_ENCRYPTION_KEY` | **Recommended** | *(auto-generated)* | TOTP encryption key (fixed for persistent 2FA) | | `SERVER_PORT` | No | `8080` | Server port | | `ADMIN_EMAIL` | No | `admin@sub2api.local` | Admin email | | `ADMIN_PASSWORD` | No | *(auto-generated)* | Admin password | -| `JWT_SECRET` | No | *(auto-generated)* | JWT secret | | `TZ` | No | `Asia/Shanghai` | Timezone | | `GEMINI_OAUTH_CLIENT_ID` | No | *(builtin)* | Google OAuth client ID (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. | | `GEMINI_OAUTH_CLIENT_SECRET` | No | *(builtin)* | Google OAuth client secret (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. | @@ -127,6 +212,30 @@ docker-compose down -v See `.env.example` for all available options. +> **Note:** The `docker-deploy.sh` script automatically generates `JWT_SECRET`, `TOTP_ENCRYPTION_KEY`, and `POSTGRES_PASSWORD` for you. + +### Easy Migration (Local Directory Version) + +When using `docker-compose.local.yml`, all data is stored in local directories, making migration simple: + +```bash +# On source server: Stop services and create archive +cd /path/to/deployment +docker-compose -f docker-compose.local.yml down +cd .. +tar czf sub2api-complete.tar.gz deployment/ + +# Transfer to new server +scp sub2api-complete.tar.gz user@new-server:/path/to/destination/ + +# On new server: Extract and start +tar xzf sub2api-complete.tar.gz +cd deployment/ +docker-compose -f docker-compose.local.yml up -d +``` + +Your entire deployment (configuration + data) is migrated! + --- ## Gemini OAuth Configuration @@ -359,6 +468,30 @@ The main config file is at `/etc/sub2api/config.yaml` (created by Setup Wizard). ### Docker +For **local directory version**: + +```bash +# Check container status +docker-compose -f docker-compose.local.yml ps + +# View detailed logs +docker-compose -f docker-compose.local.yml logs --tail=100 sub2api + +# Check database connection +docker-compose -f docker-compose.local.yml exec postgres pg_isready + +# Check Redis connection +docker-compose -f docker-compose.local.yml exec redis redis-cli ping + +# Restart all services +docker-compose -f docker-compose.local.yml restart + +# Check data directories +ls -la data/ postgres_data/ redis_data/ +``` + +For **named volumes version**: + ```bash # Check container status docker-compose ps @@ -401,3 +534,60 @@ sudo systemctl status redis 2. **Database connection failed**: Check PostgreSQL is running and credentials are correct 3. **Redis connection failed**: Check Redis is running and password is correct 4. **Permission denied**: Ensure proper file ownership for binary install + +--- + +## TLS Fingerprint Configuration + +Sub2API supports TLS fingerprint simulation to make requests appear as if they come from the official Claude CLI (Node.js client). + +> **💡 Tip:** Visit **[tls.sub2api.org](https://tls.sub2api.org/)** to get TLS fingerprint information for different devices and browsers. + +### Default Behavior + +- Built-in `claude_cli_v2` profile simulates Node.js 20.x + OpenSSL 3.x +- JA3 Hash: `1a28e69016765d92e3b381168d68922c` +- JA4: `t13d5911h1_a33745022dd6_1f22a2ca17c4` +- Profile selection: `accountID % profileCount` + +### Configuration + +```yaml +gateway: + tls_fingerprint: + enabled: true # Global switch + profiles: + # Simple profile (uses default cipher suites) + profile_1: + name: "Profile 1" + + # Profile with custom cipher suites (use compact array format) + profile_2: + name: "Profile 2" + cipher_suites: [4866, 4867, 4865, 49199, 49195, 49200, 49196] + curves: [29, 23, 24] + point_formats: [0] + + # Another custom profile + profile_3: + name: "Profile 3" + cipher_suites: [4865, 4866, 4867, 49199, 49200] + curves: [29, 23, 24, 25] +``` + +### Profile Fields + +| Field | Type | Description | +|-------|------|-------------| +| `name` | string | Display name (required) | +| `cipher_suites` | []uint16 | Cipher suites in decimal. Empty = default | +| `curves` | []uint16 | Elliptic curves in decimal. Empty = default | +| `point_formats` | []uint8 | EC point formats. Empty = default | + +### Common Values Reference + +**Cipher Suites (TLS 1.3):** `4865` (AES_128_GCM), `4866` (AES_256_GCM), `4867` (CHACHA20) + +**Cipher Suites (TLS 1.2):** `49195`, `49196`, `49199`, `49200` (ECDHE variants) + +**Curves:** `29` (X25519), `23` (P-256), `24` (P-384), `25` (P-521) diff --git a/build_image.sh b/deploy/build_image.sh similarity index 100% rename from build_image.sh rename to deploy/build_image.sh diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 9e85d1ff..6f5e9744 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -210,6 +210,19 @@ gateway: outbox_backlog_rebuild_rows: 10000 # 全量重建周期(秒),0 表示禁用 full_rebuild_interval_seconds: 300 + # TLS fingerprint simulation / TLS 指纹伪装 + # Default profile "claude_cli_v2" simulates Node.js 20.x + # 默认模板 "claude_cli_v2" 模拟 Node.js 20.x 指纹 + tls_fingerprint: + enabled: true + # profiles: + # profile_1: + # name: "Custom Profile 1" + # profile_2: + # name: "Custom Profile 2" + # cipher_suites: [4866, 4867, 4865, 49199, 49195, 49200, 49196] + # curves: [29, 23, 24] + # point_formats: [0] # ============================================================================= # API Key Auth Cache Configuration @@ -292,6 +305,27 @@ dashboard_aggregation: # 日聚合保留天数 daily_days: 730 +# ============================================================================= +# Usage Cleanup Task Configuration +# 使用记录清理任务配置(重启生效) +# ============================================================================= +usage_cleanup: + # Enable cleanup task worker + # 启用清理任务执行器 + enabled: true + # Max date range (days) per task + # 单次任务最大时间跨度(天) + max_range_days: 31 + # Batch delete size + # 单批删除数量 + batch_size: 5000 + # Worker interval (seconds) + # 执行器轮询间隔(秒) + worker_interval_seconds: 10 + # Task execution timeout (seconds) + # 单次任务最大执行时长(秒) + task_timeout_seconds: 1800 + # ============================================================================= # Concurrency Wait Configuration # 并发等待配置 @@ -342,6 +376,9 @@ redis: # Database number (0-15) # 数据库编号(0-15) db: 0 + # Enable TLS/SSL connection + # 是否启用 TLS/SSL 连接 + enable_tls: false # ============================================================================= # Ops Monitoring (Optional) @@ -369,6 +406,21 @@ jwt: # 令牌过期时间(小时,最大 24) expire_hour: 24 +# ============================================================================= +# TOTP (2FA) Configuration +# TOTP 双因素认证配置 +# ============================================================================= +totp: + # IMPORTANT: Set a fixed encryption key for TOTP secrets. + # 重要:设置固定的 TOTP 加密密钥。 + # If left empty, a random key will be generated on each startup, causing all + # existing TOTP configurations to become invalid (users won't be able to + # login with 2FA). + # 如果留空,每次启动将生成随机密钥,导致现有的 TOTP 配置失效(用户无法使用 + # 双因素认证登录)。 + # Generate with / 生成命令: openssl rand -hex 32 + encryption_key: "" + # ============================================================================= # LinuxDo Connect OAuth Login (SSO) # LinuxDo Connect OAuth 登录(用于 Sub2API 用户登录) diff --git a/deploy/docker-compose.local.yml b/deploy/docker-compose.local.yml new file mode 100644 index 00000000..05ce129a --- /dev/null +++ b/deploy/docker-compose.local.yml @@ -0,0 +1,222 @@ +# ============================================================================= +# Sub2API Docker Compose - Local Directory Version +# ============================================================================= +# This configuration uses local directories for data storage instead of named +# volumes, making it easy to migrate the entire deployment by simply copying +# the deploy directory. +# +# Quick Start: +# 1. Copy .env.example to .env and configure +# 2. mkdir -p data postgres_data redis_data +# 3. docker-compose -f docker-compose.local.yml up -d +# 4. Check logs: docker-compose -f docker-compose.local.yml logs -f sub2api +# 5. Access: http://localhost:8080 +# +# Migration to New Server: +# 1. docker-compose -f docker-compose.local.yml down +# 2. tar czf sub2api-deploy.tar.gz deploy/ +# 3. Transfer to new server and extract +# 4. docker-compose -f docker-compose.local.yml up -d +# ============================================================================= + +services: + # =========================================================================== + # Sub2API Application + # =========================================================================== + sub2api: + image: weishaw/sub2api:latest + container_name: sub2api + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + ports: + - "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080" + volumes: + # Local directory mapping for easy migration + - ./data:/app/data + # Optional: Mount custom config.yaml (uncomment and create the file first) + # Copy config.example.yaml to config.yaml, modify it, then uncomment: + # - ./config.yaml:/app/data/config.yaml:ro + environment: + # ======================================================================= + # Auto Setup (REQUIRED for Docker deployment) + # ======================================================================= + - AUTO_SETUP=true + + # ======================================================================= + # Server Configuration + # ======================================================================= + - SERVER_HOST=0.0.0.0 + - SERVER_PORT=8080 + - SERVER_MODE=${SERVER_MODE:-release} + - RUN_MODE=${RUN_MODE:-standard} + + # ======================================================================= + # Database Configuration (PostgreSQL) + # ======================================================================= + - DATABASE_HOST=postgres + - DATABASE_PORT=5432 + - DATABASE_USER=${POSTGRES_USER:-sub2api} + - DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - DATABASE_DBNAME=${POSTGRES_DB:-sub2api} + - DATABASE_SSLMODE=disable + + # ======================================================================= + # Redis Configuration + # ======================================================================= + - REDIS_HOST=redis + - REDIS_PORT=6379 + - REDIS_PASSWORD=${REDIS_PASSWORD:-} + - REDIS_DB=${REDIS_DB:-0} + - REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false} + + # ======================================================================= + # Admin Account (auto-created on first run) + # ======================================================================= + - ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local} + - ADMIN_PASSWORD=${ADMIN_PASSWORD:-} + + # ======================================================================= + # JWT Configuration + # ======================================================================= + # IMPORTANT: Set a fixed JWT_SECRET to prevent login sessions from being + # invalidated after container restarts. If left empty, a random secret + # will be generated on each startup. + # Generate a secure secret: openssl rand -hex 32 + - JWT_SECRET=${JWT_SECRET:-} + - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + + # ======================================================================= + # TOTP (2FA) Configuration + # ======================================================================= + # IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty, + # a random key will be generated on each startup, causing all existing + # TOTP configurations to become invalid (users won't be able to login + # with 2FA). + # Generate a secure key: openssl rand -hex 32 + - TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-} + + # ======================================================================= + # Timezone Configuration + # This affects ALL time operations in the application: + # - Database timestamps + # - Usage statistics "today" boundary + # - Subscription expiry times + # - Log timestamps + # Common values: Asia/Shanghai, America/New_York, Europe/London, UTC + # ======================================================================= + - TZ=${TZ:-Asia/Shanghai} + + # ======================================================================= + # Gemini OAuth Configuration (for Gemini accounts) + # ======================================================================= + - GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-} + - GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-} + - GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-} + - GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-} + + # ======================================================================= + # Security Configuration (URL Allowlist) + # ======================================================================= + # Enable URL allowlist validation (false to skip allowlist checks) + - SECURITY_URL_ALLOWLIST_ENABLED=${SECURITY_URL_ALLOWLIST_ENABLED:-false} + # Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https) + - SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=${SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP:-false} + # Allow private IP addresses for upstream/pricing/CRS (for internal deployments) + - SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=${SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS:-false} + # Upstream hosts whitelist (comma-separated, only used when enabled=true) + - SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS=${SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS:-} + + # ======================================================================= + # Update Configuration (在线更新配置) + # ======================================================================= + # Proxy for accessing GitHub (online updates + pricing data) + # Examples: http://host:port, socks5://host:port + - UPDATE_PROXY_URL=${UPDATE_PROXY_URL:-} + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + networks: + - sub2api-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + + # =========================================================================== + # PostgreSQL Database + # =========================================================================== + postgres: + image: postgres:18-alpine + container_name: sub2api-postgres + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + # Local directory mapping for easy migration + - ./postgres_data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${POSTGRES_USER:-sub2api} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - POSTGRES_DB=${POSTGRES_DB:-sub2api} + - PGDATA=/var/lib/postgresql/data + - TZ=${TZ:-Asia/Shanghai} + networks: + - sub2api-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + # 注意:不暴露端口到宿主机,应用通过内部网络连接 + # 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"] + + # =========================================================================== + # Redis Cache + # =========================================================================== + redis: + image: redis:8-alpine + container_name: sub2api-redis + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + # Local directory mapping for easy migration + - ./redis_data:/data + command: > + sh -c ' + redis-server + --save 60 1 + --appendonly yes + --appendfsync everysec + ${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}' + environment: + - TZ=${TZ:-Asia/Shanghai} + # REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag) + - REDISCLI_AUTH=${REDIS_PASSWORD:-} + networks: + - sub2api-network + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 5s + +# ============================================================================= +# Networks +# ============================================================================= +networks: + sub2api-network: + driver: bridge diff --git a/deploy/docker-compose.standalone.yml b/deploy/docker-compose.standalone.yml index 1bf247c7..97903bc5 100644 --- a/deploy/docker-compose.standalone.yml +++ b/deploy/docker-compose.standalone.yml @@ -56,6 +56,7 @@ services: - REDIS_PORT=${REDIS_PORT:-6379} - REDIS_PASSWORD=${REDIS_PASSWORD:-} - REDIS_DB=${REDIS_DB:-0} + - REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false} # ======================================================================= # Admin Account (auto-created on first run) diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 484df3a8..033731ac 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -62,6 +62,7 @@ services: - REDIS_PORT=6379 - REDIS_PASSWORD=${REDIS_PASSWORD:-} - REDIS_DB=${REDIS_DB:-0} + - REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false} # ======================================================================= # Admin Account (auto-created on first run) @@ -79,6 +80,16 @@ services: - JWT_SECRET=${JWT_SECRET:-} - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + # ======================================================================= + # TOTP (2FA) Configuration + # ======================================================================= + # IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty, + # a random key will be generated on each startup, causing all existing + # TOTP configurations to become invalid (users won't be able to login + # with 2FA). + # Generate a secure key: openssl rand -hex 32 + - TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-} + # ======================================================================= # Timezone Configuration # This affects ALL time operations in the application: diff --git a/deploy/docker-deploy.sh b/deploy/docker-deploy.sh new file mode 100644 index 00000000..1e4ce81f --- /dev/null +++ b/deploy/docker-deploy.sh @@ -0,0 +1,171 @@ +#!/bin/bash +# ============================================================================= +# Sub2API Docker Deployment Preparation Script +# ============================================================================= +# This script prepares deployment files for Sub2API: +# - Downloads docker-compose.local.yml and .env.example +# - Generates secure secrets (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD) +# - Creates necessary data directories +# +# After running this script, you can start services with: +# docker-compose -f docker-compose.local.yml up -d +# ============================================================================= + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# GitHub raw content base URL +GITHUB_RAW_URL="https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy" + +# Print colored message +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Generate random secret +generate_secret() { + openssl rand -hex 32 +} + +# Check if command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +# Main installation function +main() { + echo "" + echo "==========================================" + echo " Sub2API Deployment Preparation" + echo "==========================================" + echo "" + + # Check if openssl is available + if ! command_exists openssl; then + print_error "openssl is not installed. Please install openssl first." + exit 1 + fi + + # Check if deployment already exists + if [ -f "docker-compose.local.yml" ] && [ -f ".env" ]; then + print_warning "Deployment files already exist in current directory." + read -p "Overwrite existing files? (y/N): " -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + print_info "Cancelled." + exit 0 + fi + fi + + # Download docker-compose.local.yml + print_info "Downloading docker-compose.local.yml..." + if command_exists curl; then + curl -sSL "${GITHUB_RAW_URL}/docker-compose.local.yml" -o docker-compose.local.yml + elif command_exists wget; then + wget -q "${GITHUB_RAW_URL}/docker-compose.local.yml" -O docker-compose.local.yml + else + print_error "Neither curl nor wget is installed. Please install one of them." + exit 1 + fi + print_success "Downloaded docker-compose.local.yml" + + # Download .env.example + print_info "Downloading .env.example..." + if command_exists curl; then + curl -sSL "${GITHUB_RAW_URL}/.env.example" -o .env.example + else + wget -q "${GITHUB_RAW_URL}/.env.example" -O .env.example + fi + print_success "Downloaded .env.example" + + # Generate .env file with auto-generated secrets + print_info "Generating secure secrets..." + echo "" + + # Generate secrets + JWT_SECRET=$(generate_secret) + TOTP_ENCRYPTION_KEY=$(generate_secret) + POSTGRES_PASSWORD=$(generate_secret) + + # Create .env from .env.example + cp .env.example .env + + # Update .env with generated secrets (cross-platform compatible) + if sed --version >/dev/null 2>&1; then + # GNU sed (Linux) + sed -i "s/^JWT_SECRET=.*/JWT_SECRET=${JWT_SECRET}/" .env + sed -i "s/^TOTP_ENCRYPTION_KEY=.*/TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}/" .env + sed -i "s/^POSTGRES_PASSWORD=.*/POSTGRES_PASSWORD=${POSTGRES_PASSWORD}/" .env + else + # BSD sed (macOS) + sed -i '' "s/^JWT_SECRET=.*/JWT_SECRET=${JWT_SECRET}/" .env + sed -i '' "s/^TOTP_ENCRYPTION_KEY=.*/TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}/" .env + sed -i '' "s/^POSTGRES_PASSWORD=.*/POSTGRES_PASSWORD=${POSTGRES_PASSWORD}/" .env + fi + + # Create data directories + print_info "Creating data directories..." + mkdir -p data postgres_data redis_data + print_success "Created data directories" + + # Set secure permissions for .env file (readable/writable only by owner) + chmod 600 .env + echo "" + + # Display completion message + echo "==========================================" + echo " Preparation Complete!" + echo "==========================================" + echo "" + echo "Generated secure credentials:" + echo " POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}" + echo " JWT_SECRET: ${JWT_SECRET}" + echo " TOTP_ENCRYPTION_KEY: ${TOTP_ENCRYPTION_KEY}" + echo "" + print_warning "These credentials have been saved to .env file." + print_warning "Please keep them secure and do not share publicly!" + echo "" + echo "Directory structure:" + echo " docker-compose.local.yml - Docker Compose configuration" + echo " .env - Environment variables (generated secrets)" + echo " .env.example - Example template (for reference)" + echo " data/ - Application data (will be created on first run)" + echo " postgres_data/ - PostgreSQL data" + echo " redis_data/ - Redis data" + echo "" + echo "Next steps:" + echo " 1. (Optional) Edit .env to customize configuration" + echo " 2. Start services:" + echo " docker-compose -f docker-compose.local.yml up -d" + echo "" + echo " 3. View logs:" + echo " docker-compose -f docker-compose.local.yml logs -f sub2api" + echo "" + echo " 4. Access Web UI:" + echo " http://localhost:8080" + echo "" + print_info "If admin password is not set in .env, it will be auto-generated." + print_info "Check logs for the generated admin password on first startup." + echo "" +} + +# Run main function +main "$@" diff --git a/frontend/package-lock.json b/frontend/package-lock.json deleted file mode 100644 index e6c6144e..00000000 --- a/frontend/package-lock.json +++ /dev/null @@ -1,6954 +0,0 @@ -{ - "name": "sub2api-frontend", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "sub2api-frontend", - "version": "1.0.0", - "dependencies": { - "@lobehub/icons": "^4.0.2", - "@vueuse/core": "^10.7.0", - "axios": "^1.6.2", - "chart.js": "^4.4.1", - "driver.js": "^1.4.0", - "file-saver": "^2.0.5", - "pinia": "^2.1.7", - "vue": "^3.4.0", - "vue-chartjs": "^5.3.0", - "vue-i18n": "^9.14.5", - "vue-router": "^4.2.5", - "xlsx": "^0.18.5" - }, - "devDependencies": { - "@types/file-saver": "^2.0.7", - "@types/mdx": "^2.0.13", - "@types/node": "^20.10.5", - "@typescript-eslint/eslint-plugin": "^7.18.0", - "@typescript-eslint/parser": "^7.18.0", - "@vitejs/plugin-vue": "^5.2.3", - "@vitest/coverage-v8": "^2.1.9", - "@vue/test-utils": "^2.4.6", - "autoprefixer": "^10.4.16", - "eslint": "^8.57.0", - "eslint-plugin-vue": "^9.25.0", - "jsdom": "^24.1.3", - "postcss": "^8.4.32", - "tailwindcss": "^3.4.0", - "typescript": "~5.6.0", - "vite": "^5.0.10", - "vite-plugin-checker": "^0.9.1", - "vitest": "^2.1.9", - "vue-tsc": "^2.2.0" - } - }, - "node_modules/@alloc/quick-lru": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", - "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@ampproject/remapping": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", - "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@ant-design/cssinjs": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@ant-design/cssinjs/-/cssinjs-2.0.2.tgz", - "integrity": "sha512-7KDVIigtqlamOLtJ0hbjECX/sDGDaJXsM/KHala8I/1E4lpl9RAO585kbVvh/k1rIrFAV6JeGkXmdWyYj9XvuA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.11.1", - "@emotion/hash": "^0.8.0", - "@emotion/unitless": "^0.7.5", - "@rc-component/util": "^1.4.0", - "clsx": "^2.1.1", - "csstype": "^3.1.3", - "stylis": "^4.3.4" - }, - "peerDependencies": { - "react": ">=16.0.0", - "react-dom": ">=16.0.0" - } - }, - "node_modules/@asamuzakjp/css-color": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz", - "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@csstools/css-calc": "^2.1.3", - "@csstools/css-color-parser": "^3.0.9", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "lru-cache": "^10.4.3" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", - "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", - "license": "MIT", - "dependencies": { - "@babel/helper-validator-identifier": "^7.27.1", - "js-tokens": "^4.0.0", - "picocolors": "^1.1.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/generator": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", - "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.28.5", - "@babel/types": "^7.28.5", - "@jridgewell/gen-mapping": "^0.3.12", - "@jridgewell/trace-mapping": "^0.3.28", - "jsesc": "^3.0.2" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-globals": { - "version": "7.28.0", - "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", - "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", - "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.27.1", - "@babel/types": "^7.27.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-string-parser": { - "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", - "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", - "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/parser": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", - "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.28.5" - }, - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/runtime": { - "version": "7.28.4", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", - "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/template": { - "version": "7.27.2", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", - "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.27.1", - "@babel/parser": "^7.27.2", - "@babel/types": "^7.27.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", - "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.27.1", - "@babel/generator": "^7.28.5", - "@babel/helper-globals": "^7.28.0", - "@babel/parser": "^7.28.5", - "@babel/template": "^7.27.2", - "@babel/types": "^7.28.5", - "debug": "^4.3.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/types": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", - "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", - "license": "MIT", - "dependencies": { - "@babel/helper-string-parser": "^7.27.1", - "@babel/helper-validator-identifier": "^7.28.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@bcoe/v8-coverage": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", - "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@csstools/color-helpers": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", - "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - } - }, - "node_modules/@csstools/css-calc": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", - "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.5", - "@csstools/css-tokenizer": "^3.0.4" - } - }, - "node_modules/@csstools/css-color-parser": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", - "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/color-helpers": "^5.1.0", - "@csstools/css-calc": "^2.1.4" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.5", - "@csstools/css-tokenizer": "^3.0.4" - } - }, - "node_modules/@csstools/css-parser-algorithms": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", - "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-tokenizer": "^3.0.4" - } - }, - "node_modules/@csstools/css-tokenizer": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", - "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - } - }, - "node_modules/@emotion/babel-plugin": { - "version": "11.13.5", - "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz", - "integrity": "sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.16.7", - "@babel/runtime": "^7.18.3", - "@emotion/hash": "^0.9.2", - "@emotion/memoize": "^0.9.0", - "@emotion/serialize": "^1.3.3", - "babel-plugin-macros": "^3.1.0", - "convert-source-map": "^1.5.0", - "escape-string-regexp": "^4.0.0", - "find-root": "^1.1.0", - "source-map": "^0.5.7", - "stylis": "4.2.0" - } - }, - "node_modules/@emotion/babel-plugin/node_modules/@emotion/hash": { - "version": "0.9.2", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", - "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", - "license": "MIT" - }, - "node_modules/@emotion/babel-plugin/node_modules/stylis": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", - "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", - "license": "MIT" - }, - "node_modules/@emotion/cache": { - "version": "11.14.0", - "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.14.0.tgz", - "integrity": "sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA==", - "license": "MIT", - "dependencies": { - "@emotion/memoize": "^0.9.0", - "@emotion/sheet": "^1.4.0", - "@emotion/utils": "^1.4.2", - "@emotion/weak-memoize": "^0.4.0", - "stylis": "4.2.0" - } - }, - "node_modules/@emotion/cache/node_modules/stylis": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", - "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", - "license": "MIT" - }, - "node_modules/@emotion/css": { - "version": "11.13.5", - "resolved": "https://registry.npmjs.org/@emotion/css/-/css-11.13.5.tgz", - "integrity": "sha512-wQdD0Xhkn3Qy2VNcIzbLP9MR8TafI0MJb7BEAXKp+w4+XqErksWR4OXomuDzPsN4InLdGhVe6EYcn2ZIUCpB8w==", - "license": "MIT", - "dependencies": { - "@emotion/babel-plugin": "^11.13.5", - "@emotion/cache": "^11.13.5", - "@emotion/serialize": "^1.3.3", - "@emotion/sheet": "^1.4.0", - "@emotion/utils": "^1.4.2" - } - }, - "node_modules/@emotion/hash": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.8.0.tgz", - "integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==", - "license": "MIT" - }, - "node_modules/@emotion/memoize": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.9.0.tgz", - "integrity": "sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==", - "license": "MIT" - }, - "node_modules/@emotion/react": { - "version": "11.14.0", - "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.14.0.tgz", - "integrity": "sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.18.3", - "@emotion/babel-plugin": "^11.13.5", - "@emotion/cache": "^11.14.0", - "@emotion/serialize": "^1.3.3", - "@emotion/use-insertion-effect-with-fallbacks": "^1.2.0", - "@emotion/utils": "^1.4.2", - "@emotion/weak-memoize": "^0.4.0", - "hoist-non-react-statics": "^3.3.1" - }, - "peerDependencies": { - "react": ">=16.8.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@emotion/serialize": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.3.3.tgz", - "integrity": "sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA==", - "license": "MIT", - "dependencies": { - "@emotion/hash": "^0.9.2", - "@emotion/memoize": "^0.9.0", - "@emotion/unitless": "^0.10.0", - "@emotion/utils": "^1.4.2", - "csstype": "^3.0.2" - } - }, - "node_modules/@emotion/serialize/node_modules/@emotion/hash": { - "version": "0.9.2", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", - "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", - "license": "MIT" - }, - "node_modules/@emotion/serialize/node_modules/@emotion/unitless": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.10.0.tgz", - "integrity": "sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==", - "license": "MIT" - }, - "node_modules/@emotion/sheet": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.4.0.tgz", - "integrity": "sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==", - "license": "MIT" - }, - "node_modules/@emotion/unitless": { - "version": "0.7.5", - "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.7.5.tgz", - "integrity": "sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==", - "license": "MIT" - }, - "node_modules/@emotion/use-insertion-effect-with-fallbacks": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz", - "integrity": "sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg==", - "license": "MIT", - "peerDependencies": { - "react": ">=16.8.0" - } - }, - "node_modules/@emotion/utils": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.4.2.tgz", - "integrity": "sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA==", - "license": "MIT" - }, - "node_modules/@emotion/weak-memoize": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz", - "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", - "license": "MIT" - }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", - "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", - "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", - "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", - "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", - "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", - "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", - "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", - "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", - "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", - "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", - "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", - "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", - "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", - "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", - "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", - "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", - "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", - "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", - "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", - "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", - "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", - "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", - "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@eslint-community/eslint-utils": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", - "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" - } - }, - "node_modules/@eslint-community/regexpp": { - "version": "4.12.2", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", - "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.0.0 || ^14.0.0 || >=16.0.0" - } - }, - "node_modules/@eslint/eslintrc": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", - "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.6.0", - "globals": "^13.19.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@eslint/eslintrc/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@eslint/js": { - "version": "8.57.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", - "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - } - }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.13.0", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", - "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", - "deprecated": "Use @eslint/config-array instead", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@humanwhocodes/object-schema": "^2.0.3", - "debug": "^4.3.1", - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=10.10.0" - } - }, - "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@humanwhocodes/module-importer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.22" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, - "node_modules/@humanwhocodes/object-schema": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", - "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", - "deprecated": "Use @eslint/object-schema instead", - "dev": true, - "license": "BSD-3-Clause" - }, - "node_modules/@intlify/core-base": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-9.14.5.tgz", - "integrity": "sha512-5ah5FqZG4pOoHjkvs8mjtv+gPKYU0zCISaYNjBNNqYiaITxW8ZtVih3GS/oTOqN8d9/mDLyrjD46GBApNxmlsA==", - "license": "MIT", - "dependencies": { - "@intlify/message-compiler": "9.14.5", - "@intlify/shared": "9.14.5" - }, - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - } - }, - "node_modules/@intlify/message-compiler": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-9.14.5.tgz", - "integrity": "sha512-IHzgEu61/YIpQV5Pc3aRWScDcnFKWvQA9kigcINcCBXN8mbW+vk9SK+lDxA6STzKQsVJxUPg9ACC52pKKo3SVQ==", - "license": "MIT", - "dependencies": { - "@intlify/shared": "9.14.5", - "source-map-js": "^1.0.2" - }, - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - } - }, - "node_modules/@intlify/shared": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-9.14.5.tgz", - "integrity": "sha512-9gB+E53BYuAEMhbCAxVgG38EZrk59sxBtv3jSizNL2hEWlgjBjAw1AwpLHtNaeda12pe6W20OGEa0TwuMSRbyQ==", - "license": "MIT", - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - } - }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@isaacs/cliui/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/@isaacs/cliui/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/@istanbuljs/schema": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.13", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", - "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.0", - "@jridgewell/trace-mapping": "^0.3.24" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", - "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", - "license": "MIT" - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.31", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", - "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", - "license": "MIT", - "dependencies": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" - } - }, - "node_modules/@kurkle/color": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.4.tgz", - "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==", - "license": "MIT" - }, - "node_modules/@lobehub/icons": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@lobehub/icons/-/icons-4.0.2.tgz", - "integrity": "sha512-mYFEXXt7Z8iY8yLP5cDVctUPqlZUHWi5qzQCJiC646p7uiXhtpn93sRab/5pey+CYDh6BbRU6lhwiURu/SU5IA==", - "license": "MIT", - "workspaces": [ - "packages/*" - ], - "dependencies": { - "antd-style": "^4.1.0", - "lucide-react": "^0.469.0", - "polished": "^4.3.1" - }, - "peerDependencies": { - "@lobehub/ui": "^4.3.3", - "antd": "^6.1.1", - "react": "^19.0.0", - "react-dom": "^19.0.0" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@one-ini/wasm": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@one-ini/wasm/-/wasm-0.1.1.tgz", - "integrity": "sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "dev": true, - "license": "MIT", - "optional": true, - "engines": { - "node": ">=14" - } - }, - "node_modules/@rc-component/util": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@rc-component/util/-/util-1.7.0.tgz", - "integrity": "sha512-tIvIGj4Vl6fsZFvWSkYw9sAfiCKUXMyhVz6kpKyZbwyZyRPqv2vxYZROdaO1VB4gqTNvUZFXh6i3APUiterw5g==", - "license": "MIT", - "dependencies": { - "is-mobile": "^5.0.0", - "react-is": "^18.2.0" - }, - "peerDependencies": { - "react": ">=18.0.0", - "react-dom": ">=18.0.0" - } - }, - "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz", - "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-android-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz", - "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz", - "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz", - "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz", - "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz", - "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz", - "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz", - "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz", - "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz", - "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz", - "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz", - "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz", - "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz", - "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz", - "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz", - "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz", - "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz", - "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz", - "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz", - "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ] - }, - "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz", - "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ] - }, - "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz", - "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz", - "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz", - "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz", - "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@types/estree": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", - "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/file-saver": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/@types/file-saver/-/file-saver-2.0.7.tgz", - "integrity": "sha512-dNKVfHd/jk0SkR/exKGj2ggkB45MAkzvWCaqLUUgkyjITkGNzH8H+yUwr+BLJUBjZOe9w8X3wgmXhZDRg1ED6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/mdx": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", - "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "20.19.27", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.27.tgz", - "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", - "dev": true, - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "node_modules/@types/parse-json": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", - "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", - "license": "MIT" - }, - "node_modules/@types/web-bluetooth": { - "version": "0.0.20", - "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz", - "integrity": "sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==", - "license": "MIT" - }, - "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.18.0.tgz", - "integrity": "sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.18.0", - "@typescript-eslint/type-utils": "7.18.0", - "@typescript-eslint/utils": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0", - "graphemer": "^1.4.0", - "ignore": "^5.3.1", - "natural-compare": "^1.4.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^7.0.0", - "eslint": "^8.56.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/parser": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.18.0.tgz", - "integrity": "sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "@typescript-eslint/scope-manager": "7.18.0", - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/typescript-estree": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0", - "debug": "^4.3.4" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.56.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.18.0.tgz", - "integrity": "sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/type-utils": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.18.0.tgz", - "integrity": "sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/typescript-estree": "7.18.0", - "@typescript-eslint/utils": "7.18.0", - "debug": "^4.3.4", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.56.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/types": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.18.0.tgz", - "integrity": "sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.18.0.tgz", - "integrity": "sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/visitor-keys": "7.18.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/utils": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.18.0.tgz", - "integrity": "sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.18.0", - "@typescript-eslint/types": "7.18.0", - "@typescript-eslint/typescript-estree": "7.18.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.56.0" - } - }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.18.0.tgz", - "integrity": "sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "7.18.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@ungap/structured-clone": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", - "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", - "dev": true, - "license": "ISC" - }, - "node_modules/@vitejs/plugin-vue": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-5.2.4.tgz", - "integrity": "sha512-7Yx/SXSOcQq5HiiV3orevHUFn+pmMB4cgbEkDYgnkUWb0WfeQ/wa2yFv6D5ICiCQOVpjA7vYDXrC7AGO8yjDHA==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "peerDependencies": { - "vite": "^5.0.0 || ^6.0.0", - "vue": "^3.2.25" - } - }, - "node_modules/@vitest/coverage-v8": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-2.1.9.tgz", - "integrity": "sha512-Z2cOr0ksM00MpEfyVE8KXIYPEcBFxdbLSs56L8PO0QQMxt/6bDj45uQfxoc96v05KW3clk7vvgP0qfDit9DmfQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@ampproject/remapping": "^2.3.0", - "@bcoe/v8-coverage": "^0.2.3", - "debug": "^4.3.7", - "istanbul-lib-coverage": "^3.2.2", - "istanbul-lib-report": "^3.0.1", - "istanbul-lib-source-maps": "^5.0.6", - "istanbul-reports": "^3.1.7", - "magic-string": "^0.30.12", - "magicast": "^0.3.5", - "std-env": "^3.8.0", - "test-exclude": "^7.0.1", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@vitest/browser": "2.1.9", - "vitest": "2.1.9" - }, - "peerDependenciesMeta": { - "@vitest/browser": { - "optional": true - } - } - }, - "node_modules/@vitest/expect": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.9.tgz", - "integrity": "sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "2.1.9", - "@vitest/utils": "2.1.9", - "chai": "^5.1.2", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/mocker": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.9.tgz", - "integrity": "sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "2.1.9", - "estree-walker": "^3.0.3", - "magic-string": "^0.30.12" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "msw": "^2.4.9", - "vite": "^5.0.0" - }, - "peerDependenciesMeta": { - "msw": { - "optional": true - }, - "vite": { - "optional": true - } - } - }, - "node_modules/@vitest/mocker/node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "node_modules/@vitest/pretty-format": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.9.tgz", - "integrity": "sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/runner": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.9.tgz", - "integrity": "sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/utils": "2.1.9", - "pathe": "^1.1.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/snapshot": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.9.tgz", - "integrity": "sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "2.1.9", - "magic-string": "^0.30.12", - "pathe": "^1.1.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/spy": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.9.tgz", - "integrity": "sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyspy": "^3.0.2" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/utils": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.9.tgz", - "integrity": "sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "2.1.9", - "loupe": "^3.1.2", - "tinyrainbow": "^1.2.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@volar/language-core": { - "version": "2.4.15", - "resolved": "https://registry.npmjs.org/@volar/language-core/-/language-core-2.4.15.tgz", - "integrity": "sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/source-map": "2.4.15" - } - }, - "node_modules/@volar/source-map": { - "version": "2.4.15", - "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-2.4.15.tgz", - "integrity": "sha512-CPbMWlUN6hVZJYGcU/GSoHu4EnCHiLaXI9n8c9la6RaI9W5JHX+NqG+GSQcB0JdC2FIBLdZJwGsfKyBB71VlTg==", - "dev": true, - "license": "MIT" - }, - "node_modules/@volar/typescript": { - "version": "2.4.15", - "resolved": "https://registry.npmjs.org/@volar/typescript/-/typescript-2.4.15.tgz", - "integrity": "sha512-2aZ8i0cqPGjXb4BhkMsPYDkkuc2ZQ6yOpqwAuNwUoncELqoy5fRgOQtLR9gB0g902iS0NAkvpIzs27geVyVdPg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/language-core": "2.4.15", - "path-browserify": "^1.0.1", - "vscode-uri": "^3.0.8" - } - }, - "node_modules/@vue/compiler-core": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.26.tgz", - "integrity": "sha512-vXyI5GMfuoBCnv5ucIT7jhHKl55Y477yxP6fc4eUswjP8FG3FFVFd41eNDArR+Uk3QKn2Z85NavjaxLxOC19/w==", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.28.5", - "@vue/shared": "3.5.26", - "entities": "^7.0.0", - "estree-walker": "^2.0.2", - "source-map-js": "^1.2.1" - } - }, - "node_modules/@vue/compiler-dom": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.5.26.tgz", - "integrity": "sha512-y1Tcd3eXs834QjswshSilCBnKGeQjQXB6PqFn/1nxcQw4pmG42G8lwz+FZPAZAby6gZeHSt/8LMPfZ4Rb+Bd/A==", - "license": "MIT", - "dependencies": { - "@vue/compiler-core": "3.5.26", - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/compiler-sfc": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.5.26.tgz", - "integrity": "sha512-egp69qDTSEZcf4bGOSsprUr4xI73wfrY5oRs6GSgXFTiHrWj4Y3X5Ydtip9QMqiCMCPVwLglB9GBxXtTadJ3mA==", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.28.5", - "@vue/compiler-core": "3.5.26", - "@vue/compiler-dom": "3.5.26", - "@vue/compiler-ssr": "3.5.26", - "@vue/shared": "3.5.26", - "estree-walker": "^2.0.2", - "magic-string": "^0.30.21", - "postcss": "^8.5.6", - "source-map-js": "^1.2.1" - } - }, - "node_modules/@vue/compiler-ssr": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.26.tgz", - "integrity": "sha512-lZT9/Y0nSIRUPVvapFJEVDbEXruZh2IYHMk2zTtEgJSlP5gVOqeWXH54xDKAaFS4rTnDeDBQUYDtxKyoW9FwDw==", - "license": "MIT", - "dependencies": { - "@vue/compiler-dom": "3.5.26", - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/compiler-vue2": { - "version": "2.7.16", - "resolved": "https://registry.npmjs.org/@vue/compiler-vue2/-/compiler-vue2-2.7.16.tgz", - "integrity": "sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A==", - "dev": true, - "license": "MIT", - "dependencies": { - "de-indent": "^1.0.2", - "he": "^1.2.0" - } - }, - "node_modules/@vue/devtools-api": { - "version": "6.6.4", - "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.4.tgz", - "integrity": "sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==", - "license": "MIT" - }, - "node_modules/@vue/language-core": { - "version": "2.2.12", - "resolved": "https://registry.npmjs.org/@vue/language-core/-/language-core-2.2.12.tgz", - "integrity": "sha512-IsGljWbKGU1MZpBPN+BvPAdr55YPkj2nB/TBNGNC32Vy2qLG25DYu/NBN2vNtZqdRbTRjaoYrahLrToim2NanA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/language-core": "2.4.15", - "@vue/compiler-dom": "^3.5.0", - "@vue/compiler-vue2": "^2.7.16", - "@vue/shared": "^3.5.0", - "alien-signals": "^1.0.3", - "minimatch": "^9.0.3", - "muggle-string": "^0.4.1", - "path-browserify": "^1.0.1" - }, - "peerDependencies": { - "typescript": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@vue/reactivity": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.26.tgz", - "integrity": "sha512-9EnYB1/DIiUYYnzlnUBgwU32NNvLp/nhxLXeWRhHUEeWNTn1ECxX8aGO7RTXeX6PPcxe3LLuNBFoJbV4QZ+CFQ==", - "license": "MIT", - "dependencies": { - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/runtime-core": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.5.26.tgz", - "integrity": "sha512-xJWM9KH1kd201w5DvMDOwDHYhrdPTrAatn56oB/LRG4plEQeZRQLw0Bpwih9KYoqmzaxF0OKSn6swzYi84e1/Q==", - "license": "MIT", - "dependencies": { - "@vue/reactivity": "3.5.26", - "@vue/shared": "3.5.26" - } - }, - "node_modules/@vue/runtime-dom": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.5.26.tgz", - "integrity": "sha512-XLLd/+4sPC2ZkN/6+V4O4gjJu6kSDbHAChvsyWgm1oGbdSO3efvGYnm25yCjtFm/K7rrSDvSfPDgN1pHgS4VNQ==", - "license": "MIT", - "dependencies": { - "@vue/reactivity": "3.5.26", - "@vue/runtime-core": "3.5.26", - "@vue/shared": "3.5.26", - "csstype": "^3.2.3" - } - }, - "node_modules/@vue/server-renderer": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.5.26.tgz", - "integrity": "sha512-TYKLXmrwWKSodyVuO1WAubucd+1XlLg4set0YoV+Hu8Lo79mp/YMwWV5mC5FgtsDxX3qo1ONrxFaTP1OQgy1uA==", - "license": "MIT", - "dependencies": { - "@vue/compiler-ssr": "3.5.26", - "@vue/shared": "3.5.26" - }, - "peerDependencies": { - "vue": "3.5.26" - } - }, - "node_modules/@vue/shared": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.26.tgz", - "integrity": "sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A==", - "license": "MIT" - }, - "node_modules/@vue/test-utils": { - "version": "2.4.6", - "resolved": "https://registry.npmjs.org/@vue/test-utils/-/test-utils-2.4.6.tgz", - "integrity": "sha512-FMxEjOpYNYiFe0GkaHsnJPXFHxQ6m4t8vI/ElPGpMWxZKpmRvQ33OIrvRXemy6yha03RxhOlQuy+gZMC3CQSow==", - "dev": true, - "license": "MIT", - "dependencies": { - "js-beautify": "^1.14.9", - "vue-component-type-helpers": "^2.0.0" - } - }, - "node_modules/@vueuse/core": { - "version": "10.11.1", - "resolved": "https://registry.npmjs.org/@vueuse/core/-/core-10.11.1.tgz", - "integrity": "sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww==", - "license": "MIT", - "dependencies": { - "@types/web-bluetooth": "^0.0.20", - "@vueuse/metadata": "10.11.1", - "@vueuse/shared": "10.11.1", - "vue-demi": ">=0.14.8" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/@vueuse/metadata": { - "version": "10.11.1", - "resolved": "https://registry.npmjs.org/@vueuse/metadata/-/metadata-10.11.1.tgz", - "integrity": "sha512-IGa5FXd003Ug1qAZmyE8wF3sJ81xGLSqTqtQ6jaVfkeZ4i5kS2mwQF61yhVqojRnenVew5PldLyRgvdl4YYuSw==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/@vueuse/shared": { - "version": "10.11.1", - "resolved": "https://registry.npmjs.org/@vueuse/shared/-/shared-10.11.1.tgz", - "integrity": "sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==", - "license": "MIT", - "dependencies": { - "vue-demi": ">=0.14.8" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/abbrev": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-2.0.0.tgz", - "integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==", - "dev": true, - "license": "ISC", - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/acorn": { - "version": "8.15.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", - "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", - "dev": true, - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "dev": true, - "license": "MIT", - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "node_modules/adler-32": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/adler-32/-/adler-32-1.3.1.tgz", - "integrity": "sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/agent-base": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", - "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/alien-signals": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/alien-signals/-/alien-signals-1.0.13.tgz", - "integrity": "sha512-OGj9yyTnJEttvzhTUWuscOvtqxq5vrhF7vL9oS0xJ2mK0ItPYP1/y+vCFebfxoEyAz0++1AIwJ5CMr+Fk3nDmg==", - "dev": true, - "license": "MIT" - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/antd-style": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/antd-style/-/antd-style-4.1.0.tgz", - "integrity": "sha512-vnPBGg0OVlSz90KRYZhxd89aZiOImTiesF+9MQqN8jsLGZUQTjbP04X9jTdEfsztKUuMbBWg/RmB/wHTakbtMQ==", - "license": "MIT", - "dependencies": { - "@ant-design/cssinjs": "^2.0.0", - "@babel/runtime": "^7.24.1", - "@emotion/cache": "^11.11.0", - "@emotion/css": "^11.11.2", - "@emotion/react": "^11.11.4", - "@emotion/serialize": "^1.1.3", - "@emotion/utils": "^1.2.1", - "use-merge-value": "^1.2.0" - }, - "peerDependencies": { - "antd": ">=6.0.0", - "react": ">=18" - } - }, - "node_modules/any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "dev": true, - "license": "MIT" - }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "dev": true, - "license": "ISC", - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", - "dev": true, - "license": "MIT" - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "license": "Python-2.0" - }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/assertion-error": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", - "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "license": "MIT" - }, - "node_modules/autoprefixer": { - "version": "10.4.23", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz", - "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "browserslist": "^4.28.1", - "caniuse-lite": "^1.0.30001760", - "fraction.js": "^5.3.4", - "picocolors": "^1.1.1", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/axios": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", - "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", - "license": "MIT", - "dependencies": { - "follow-redirects": "^1.15.6", - "form-data": "^4.0.4", - "proxy-from-env": "^1.1.0" - } - }, - "node_modules/babel-plugin-macros": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz", - "integrity": "sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.5", - "cosmiconfig": "^7.0.0", - "resolve": "^1.19.0" - }, - "engines": { - "node": ">=10", - "npm": ">=6" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/baseline-browser-mapping": { - "version": "2.9.14", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.14.tgz", - "integrity": "sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "baseline-browser-mapping": "dist/cli.js" - } - }, - "node_modules/binary-extensions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", - "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", - "dev": true, - "license": "ISC" - }, - "node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, - "license": "MIT", - "dependencies": { - "fill-range": "^7.1.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/browserslist": { - "version": "4.28.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", - "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "baseline-browser-mapping": "^2.9.0", - "caniuse-lite": "^1.0.30001759", - "electron-to-chromium": "^1.5.263", - "node-releases": "^2.0.27", - "update-browserslist-db": "^1.2.0" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/camelcase-css": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001763", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001763.tgz", - "integrity": "sha512-mh/dGtq56uN98LlNX9qdbKnzINhX0QzhiWBFEkFfsFO4QyCvL8YegrJAazCwXIeqkIob8BlZPGM3xdnY+sgmvQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "CC-BY-4.0" - }, - "node_modules/cfb": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cfb/-/cfb-1.2.2.tgz", - "integrity": "sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA==", - "license": "Apache-2.0", - "dependencies": { - "adler-32": "~1.3.0", - "crc-32": "~1.2.0" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/chai": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", - "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "assertion-error": "^2.0.1", - "check-error": "^2.1.1", - "deep-eql": "^5.0.1", - "loupe": "^3.1.0", - "pathval": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/chart.js": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.1.tgz", - "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", - "license": "MIT", - "dependencies": { - "@kurkle/color": "^0.3.0" - }, - "engines": { - "pnpm": ">=8" - } - }, - "node_modules/check-error": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz", - "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 16" - } - }, - "node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", - "dev": true, - "license": "MIT", - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/chokidar/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/clsx": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", - "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/codepage": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/codepage/-/codepage-1.15.0.tgz", - "integrity": "sha512-3g6NUTPd/YtuuGrhMnOMRjFc+LJw/bnMp3+0r/Wcz3IXUuCosKRJvMphm5+Q+bvTVGcJJuRvVLuYba+WojaFaA==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true, - "license": "MIT" - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "license": "MIT", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true, - "license": "MIT" - }, - "node_modules/config-chain": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "node_modules/convert-source-map": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", - "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", - "license": "MIT" - }, - "node_modules/cosmiconfig": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", - "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", - "license": "MIT", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/crc-32": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/crc-32/-/crc-32-1.2.2.tgz", - "integrity": "sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==", - "license": "Apache-2.0", - "bin": { - "crc32": "bin/crc32.njs" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "dev": true, - "license": "MIT", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssstyle": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz", - "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@asamuzakjp/css-color": "^3.2.0", - "rrweb-cssom": "^0.8.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/cssstyle/node_modules/rrweb-cssom": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", - "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", - "dev": true, - "license": "MIT" - }, - "node_modules/csstype": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", - "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", - "license": "MIT" - }, - "node_modules/data-urls": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", - "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==", - "dev": true, - "license": "MIT", - "dependencies": { - "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^14.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/de-indent": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", - "integrity": "sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==", - "dev": true, - "license": "MIT" - }, - "node_modules/debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decimal.js": { - "version": "10.6.0", - "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", - "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", - "dev": true, - "license": "MIT" - }, - "node_modules/deep-eql": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", - "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "license": "MIT", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/didyoumean": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", - "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dlv": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", - "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", - "dev": true, - "license": "MIT" - }, - "node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/driver.js": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/driver.js/-/driver.js-1.4.0.tgz", - "integrity": "sha512-Gm64jm6PmcU+si21sQhBrTAM1JvUrR0QhNmjkprNLxohOBzul9+pNHXgQaT9lW84gwg9GMLB3NZGuGolsz5uew==", - "license": "MIT" - }, - "node_modules/dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "dev": true, - "license": "MIT" - }, - "node_modules/editorconfig": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-1.0.4.tgz", - "integrity": "sha512-L9Qe08KWTlqYMVvMcTIvMAdl1cDUubzRNYL+WfA4bLDMHe4nemKkpmYzkznE1FwLKu0EEmy6obgQKzMJrg4x9Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@one-ini/wasm": "0.1.1", - "commander": "^10.0.0", - "minimatch": "9.0.1", - "semver": "^7.5.3" - }, - "bin": { - "editorconfig": "bin/editorconfig" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/editorconfig/node_modules/commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14" - } - }, - "node_modules/editorconfig/node_modules/minimatch": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.1.tgz", - "integrity": "sha512-0jWhJpD/MdhPXwPuiRkCbfYfSKp2qnn2eOc279qI7f+osl/l+prKSrvhg157zSYvx/1nmgn2NqdT6k2Z7zSH9w==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/electron-to-chromium": { - "version": "1.5.267", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", - "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", - "dev": true, - "license": "ISC" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true, - "license": "MIT" - }, - "node_modules/entities": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-7.0.0.tgz", - "integrity": "sha512-FDWG5cmEYf2Z00IkYRhbFrwIwvdFKH07uV8dvNy0omp/Qb1xcyCWp2UDtcwJF4QZZvk0sLudP6/hAu42TaqVhQ==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/error-ex": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", - "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", - "license": "MIT", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-module-lexer": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", - "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", - "dev": true, - "license": "MIT" - }, - "node_modules/es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-set-tostringtag": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", - "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.6", - "has-tostringtag": "^1.0.2", - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/esbuild": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", - "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.21.5", - "@esbuild/android-arm": "0.21.5", - "@esbuild/android-arm64": "0.21.5", - "@esbuild/android-x64": "0.21.5", - "@esbuild/darwin-arm64": "0.21.5", - "@esbuild/darwin-x64": "0.21.5", - "@esbuild/freebsd-arm64": "0.21.5", - "@esbuild/freebsd-x64": "0.21.5", - "@esbuild/linux-arm": "0.21.5", - "@esbuild/linux-arm64": "0.21.5", - "@esbuild/linux-ia32": "0.21.5", - "@esbuild/linux-loong64": "0.21.5", - "@esbuild/linux-mips64el": "0.21.5", - "@esbuild/linux-ppc64": "0.21.5", - "@esbuild/linux-riscv64": "0.21.5", - "@esbuild/linux-s390x": "0.21.5", - "@esbuild/linux-x64": "0.21.5", - "@esbuild/netbsd-x64": "0.21.5", - "@esbuild/openbsd-x64": "0.21.5", - "@esbuild/sunos-x64": "0.21.5", - "@esbuild/win32-arm64": "0.21.5", - "@esbuild/win32-ia32": "0.21.5", - "@esbuild/win32-x64": "0.21.5" - } - }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint": { - "version": "8.57.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", - "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", - "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.6.1", - "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.57.1", - "@humanwhocodes/config-array": "^0.13.0", - "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "@ungap/structured-clone": "^1.2.0", - "ajv": "^6.12.4", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.3.2", - "doctrine": "^3.0.0", - "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.2.2", - "eslint-visitor-keys": "^3.4.3", - "espree": "^9.6.1", - "esquery": "^1.4.2", - "esutils": "^2.0.2", - "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", - "find-up": "^5.0.0", - "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "graphemer": "^1.4.0", - "ignore": "^5.2.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-yaml": "^4.1.0", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash.merge": "^4.6.2", - "minimatch": "^3.1.2", - "natural-compare": "^1.4.0", - "optionator": "^0.9.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, - "bin": { - "eslint": "bin/eslint.js" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint-plugin-vue": { - "version": "9.33.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.33.0.tgz", - "integrity": "sha512-174lJKuNsuDIlLpjeXc5E2Tss8P44uIimAfGD0b90k0NoirJqpG7stLuU9Vp/9ioTOrQdWVREc4mRd1BD+CvGw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "globals": "^13.24.0", - "natural-compare": "^1.4.0", - "nth-check": "^2.1.1", - "postcss-selector-parser": "^6.0.15", - "semver": "^7.6.3", - "vue-eslint-parser": "^9.4.3", - "xml-name-validator": "^4.0.0" - }, - "engines": { - "node": "^14.17.0 || >=16.0.0" - }, - "peerDependencies": { - "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0 || ^9.0.0" - } - }, - "node_modules/eslint-scope": { - "version": "7.2.2", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", - "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/eslint/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/espree": { - "version": "9.6.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", - "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "acorn": "^8.9.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/esquery": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", - "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "estraverse": "^5.1.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estree-walker": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "license": "MIT" - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/expect-type": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", - "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true, - "license": "MIT" - }, - "node_modules/fast-glob": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", - "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.8" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fast-glob/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true, - "license": "MIT" - }, - "node_modules/fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true, - "license": "MIT" - }, - "node_modules/fastq": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", - "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", - "dev": true, - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/fdir": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "node_modules/file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", - "dev": true, - "license": "MIT", - "dependencies": { - "flat-cache": "^3.0.4" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/file-saver": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/file-saver/-/file-saver-2.0.5.tgz", - "integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==", - "license": "MIT" - }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/find-root": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", - "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==", - "license": "MIT" - }, - "node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dev": true, - "license": "MIT", - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/flat-cache": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", - "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", - "dev": true, - "license": "MIT", - "dependencies": { - "flatted": "^3.2.9", - "keyv": "^4.5.3", - "rimraf": "^3.0.2" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "node_modules/flatted": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", - "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", - "dev": true, - "license": "ISC" - }, - "node_modules/follow-redirects": { - "version": "1.15.11", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", - "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/foreground-child": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", - "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", - "dev": true, - "license": "ISC", - "dependencies": { - "cross-spawn": "^7.0.6", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/form-data": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", - "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", - "license": "MIT", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "es-set-tostringtag": "^2.1.0", - "hasown": "^2.0.2", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/frac": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/frac/-/frac-1.1.2.tgz", - "integrity": "sha512-w/XBfkibaTl3YDqASwfDUqkna4Z2p9cFSr1aHDt0WoMTECnRfBOv2WArlZILlqgWlmdIlALXGpM2AOhEk5W3IA==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/fraction.js": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", - "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "*" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/rawify" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true, - "license": "ISC" - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "license": "MIT", - "dependencies": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/glob/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/globals": { - "version": "13.24.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", - "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "type-fest": "^0.20.2" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "license": "MIT", - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/graphemer": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", - "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", - "dev": true, - "license": "MIT" - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-tostringtag": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", - "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "license": "MIT", - "dependencies": { - "has-symbols": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "dev": true, - "license": "MIT", - "bin": { - "he": "bin/he" - } - }, - "node_modules/hoist-non-react-statics": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", - "license": "BSD-3-Clause", - "dependencies": { - "react-is": "^16.7.0" - } - }, - "node_modules/hoist-non-react-statics/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" - }, - "node_modules/html-encoding-sniffer": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", - "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "whatwg-encoding": "^3.1.1" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true, - "license": "MIT" - }, - "node_modules/http-proxy-agent": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", - "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.0", - "debug": "^4.3.4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/https-proxy-agent": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", - "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.2", - "debug": "4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ignore": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/import-fresh": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", - "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", - "license": "MIT", - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "dev": true, - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true, - "license": "ISC" - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", - "license": "MIT" - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, - "license": "MIT", - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-core-module": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", - "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", - "license": "MIT", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-mobile": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-mobile/-/is-mobile-5.0.0.tgz", - "integrity": "sha512-Tz/yndySvLAEXh+Uk8liFCxOwVH6YutuR74utvOcu7I9Di+DwM0mtdPVZNaVvvBUM2OXxne/NhOs1zAO7riusQ==", - "license": "MIT" - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-potential-custom-element-name": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", - "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true, - "license": "ISC" - }, - "node_modules/istanbul-lib-coverage": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", - "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-report": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", - "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "istanbul-lib-coverage": "^3.0.0", - "make-dir": "^4.0.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/istanbul-lib-source-maps": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz", - "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.23", - "debug": "^4.1.1", - "istanbul-lib-coverage": "^3.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/istanbul-reports": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", - "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "html-escaper": "^2.0.0", - "istanbul-lib-report": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, - "node_modules/jiti": { - "version": "1.21.7", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", - "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", - "dev": true, - "license": "MIT", - "bin": { - "jiti": "bin/jiti.js" - } - }, - "node_modules/js-beautify": { - "version": "1.15.4", - "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.15.4.tgz", - "integrity": "sha512-9/KXeZUKKJwqCXUdBxFJ3vPh467OCckSBmYDwSK/EtV090K+iMJ7zx2S3HLVDIWFQdqMIsZWbnaGiba18aWhaA==", - "dev": true, - "license": "MIT", - "dependencies": { - "config-chain": "^1.1.13", - "editorconfig": "^1.0.4", - "glob": "^10.4.2", - "js-cookie": "^3.0.5", - "nopt": "^7.2.1" - }, - "bin": { - "css-beautify": "js/bin/css-beautify.js", - "html-beautify": "js/bin/html-beautify.js", - "js-beautify": "js/bin/js-beautify.js" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/js-beautify/node_modules/glob": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", - "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/js-cookie": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-3.0.5.tgz", - "integrity": "sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "license": "MIT" - }, - "node_modules/js-yaml": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", - "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", - "dev": true, - "license": "MIT", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsdom": { - "version": "24.1.3", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-24.1.3.tgz", - "integrity": "sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "cssstyle": "^4.0.1", - "data-urls": "^5.0.0", - "decimal.js": "^10.4.3", - "form-data": "^4.0.0", - "html-encoding-sniffer": "^4.0.0", - "http-proxy-agent": "^7.0.2", - "https-proxy-agent": "^7.0.5", - "is-potential-custom-element-name": "^1.0.1", - "nwsapi": "^2.2.12", - "parse5": "^7.1.2", - "rrweb-cssom": "^0.7.1", - "saxes": "^6.0.0", - "symbol-tree": "^3.2.4", - "tough-cookie": "^4.1.4", - "w3c-xmlserializer": "^5.0.0", - "webidl-conversions": "^7.0.0", - "whatwg-encoding": "^3.1.1", - "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^14.0.0", - "ws": "^8.18.0", - "xml-name-validator": "^5.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "canvas": "^2.11.2" - }, - "peerDependenciesMeta": { - "canvas": { - "optional": true - } - } - }, - "node_modules/jsdom/node_modules/xml-name-validator": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", - "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/jsesc": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", - "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", - "license": "MIT", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "license": "MIT" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true, - "license": "MIT" - }, - "node_modules/json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/keyv": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", - "dev": true, - "license": "MIT", - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/lilconfig": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", - "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/antonk52" - } - }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "license": "MIT" - }, - "node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "license": "MIT", - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/loupe": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", - "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/lucide-react": { - "version": "0.469.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.469.0.tgz", - "integrity": "sha512-28vvUnnKQ/dBwiCQtwJw7QauYnE7yd2Cyp4tTTJpvglX4EMpbflcdBgrgToX2j71B3YvugK/NH3BGUk+E/p/Fw==", - "license": "ISC", - "peerDependencies": { - "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" - } - }, - "node_modules/magic-string": { - "version": "0.30.21", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", - "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.5" - } - }, - "node_modules/magicast": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz", - "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.25.4", - "@babel/types": "^7.25.4", - "source-map-js": "^1.2.0" - } - }, - "node_modules/make-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", - "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^7.5.3" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dev": true, - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "license": "MIT" - }, - "node_modules/muggle-string": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/muggle-string/-/muggle-string-0.4.1.tgz", - "integrity": "sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, - "node_modules/nanoid": { - "version": "3.3.11", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", - "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true, - "license": "MIT" - }, - "node_modules/node-releases": { - "version": "2.0.27", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", - "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", - "dev": true, - "license": "MIT" - }, - "node_modules/nopt": { - "version": "7.2.1", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-7.2.1.tgz", - "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", - "dev": true, - "license": "ISC", - "dependencies": { - "abbrev": "^2.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/npm-run-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", - "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^4.0.0", - "unicorn-magic": "^0.3.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm-run-path/node_modules/path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/nth-check": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", - "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, - "node_modules/nwsapi": { - "version": "2.2.23", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.23.tgz", - "integrity": "sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", - "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/optionator": { - "version": "0.9.4", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", - "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", - "dev": true, - "license": "MIT", - "dependencies": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.5" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "license": "MIT", - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/package-json-from-dist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", - "dev": true, - "license": "BlueOak-1.0.0" - }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "license": "MIT", - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parse5": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", - "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "entities": "^6.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5/node_modules/entities": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", - "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/path-browserify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", - "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", - "dev": true, - "license": "MIT" - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "license": "MIT" - }, - "node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/pathe": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", - "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/pathval": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", - "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.16" - } - }, - "node_modules/picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "license": "ISC" - }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pinia": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/pinia/-/pinia-2.3.1.tgz", - "integrity": "sha512-khUlZSwt9xXCaTbbxFYBKDc/bWAGWJjOgvxETwkTN7KRm66EeT1ZdZj6i2ceh9sP2Pzqsbc704r2yngBrxBVug==", - "license": "MIT", - "dependencies": { - "@vue/devtools-api": "^6.6.3", - "vue-demi": "^0.14.10" - }, - "funding": { - "url": "https://github.com/sponsors/posva" - }, - "peerDependencies": { - "typescript": ">=4.4.4", - "vue": "^2.7.0 || ^3.5.11" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/pirates": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", - "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/polished": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz", - "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.17.8" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/postcss": { - "version": "8.5.6", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", - "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.11", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/postcss-import": { - "version": "15.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", - "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "postcss": "^8.0.0" - } - }, - "node_modules/postcss-js": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", - "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "camelcase-css": "^2.0.1" - }, - "engines": { - "node": "^12 || ^14 || >= 16" - }, - "peerDependencies": { - "postcss": "^8.4.21" - } - }, - "node_modules/postcss-load-config": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", - "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "lilconfig": "^3.1.1" - }, - "engines": { - "node": ">= 18" - }, - "peerDependencies": { - "jiti": ">=1.21.0", - "postcss": ">=8.0.9", - "tsx": "^4.8.1", - "yaml": "^2.4.2" - }, - "peerDependenciesMeta": { - "jiti": { - "optional": true - }, - "postcss": { - "optional": true - }, - "tsx": { - "optional": true - }, - "yaml": { - "optional": true - } - } - }, - "node_modules/postcss-nested": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", - "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.1.1" - }, - "engines": { - "node": ">=12.0" - }, - "peerDependencies": { - "postcss": "^8.2.14" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", - "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", - "dev": true, - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/proto-list": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", - "dev": true, - "license": "ISC" - }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", - "license": "MIT" - }, - "node_modules/psl": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", - "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", - "dev": true, - "license": "MIT", - "dependencies": { - "punycode": "^2.3.1" - }, - "funding": { - "url": "https://github.com/sponsors/lupomontero" - } - }, - "node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/react-is": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", - "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", - "license": "MIT" - }, - "node_modules/read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", - "dev": true, - "license": "MIT", - "dependencies": { - "pify": "^2.3.0" - } - }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "dev": true, - "license": "MIT", - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/resolve": { - "version": "1.22.11", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", - "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", - "license": "MIT", - "dependencies": { - "is-core-module": "^2.16.1", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/reusify": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", - "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", - "dev": true, - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/rollup": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz", - "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.8" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.55.1", - "@rollup/rollup-android-arm64": "4.55.1", - "@rollup/rollup-darwin-arm64": "4.55.1", - "@rollup/rollup-darwin-x64": "4.55.1", - "@rollup/rollup-freebsd-arm64": "4.55.1", - "@rollup/rollup-freebsd-x64": "4.55.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.55.1", - "@rollup/rollup-linux-arm-musleabihf": "4.55.1", - "@rollup/rollup-linux-arm64-gnu": "4.55.1", - "@rollup/rollup-linux-arm64-musl": "4.55.1", - "@rollup/rollup-linux-loong64-gnu": "4.55.1", - "@rollup/rollup-linux-loong64-musl": "4.55.1", - "@rollup/rollup-linux-ppc64-gnu": "4.55.1", - "@rollup/rollup-linux-ppc64-musl": "4.55.1", - "@rollup/rollup-linux-riscv64-gnu": "4.55.1", - "@rollup/rollup-linux-riscv64-musl": "4.55.1", - "@rollup/rollup-linux-s390x-gnu": "4.55.1", - "@rollup/rollup-linux-x64-gnu": "4.55.1", - "@rollup/rollup-linux-x64-musl": "4.55.1", - "@rollup/rollup-openbsd-x64": "4.55.1", - "@rollup/rollup-openharmony-arm64": "4.55.1", - "@rollup/rollup-win32-arm64-msvc": "4.55.1", - "@rollup/rollup-win32-ia32-msvc": "4.55.1", - "@rollup/rollup-win32-x64-gnu": "4.55.1", - "@rollup/rollup-win32-x64-msvc": "4.55.1", - "fsevents": "~2.3.2" - } - }, - "node_modules/rrweb-cssom": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz", - "integrity": "sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==", - "dev": true, - "license": "MIT" - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true, - "license": "MIT" - }, - "node_modules/saxes": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", - "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", - "dev": true, - "license": "ISC", - "dependencies": { - "xmlchars": "^2.2.0" - }, - "engines": { - "node": ">=v12.22.7" - } - }, - "node_modules/semver": { - "version": "7.7.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", - "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/siginfo": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", - "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", - "dev": true, - "license": "ISC" - }, - "node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-js": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", - "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ssf": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/ssf/-/ssf-0.11.2.tgz", - "integrity": "sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g==", - "license": "Apache-2.0", - "dependencies": { - "frac": "~1.1.2" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/stackback": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", - "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", - "dev": true, - "license": "MIT" - }, - "node_modules/std-env": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", - "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/stylis": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", - "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", - "license": "MIT" - }, - "node_modules/sucrase": { - "version": "3.35.1", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", - "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.2", - "commander": "^4.0.0", - "lines-and-columns": "^1.1.6", - "mz": "^2.7.0", - "pirates": "^4.0.1", - "tinyglobby": "^0.2.11", - "ts-interface-checker": "^0.1.9" - }, - "bin": { - "sucrase": "bin/sucrase", - "sucrase-node": "bin/sucrase-node" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/symbol-tree": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", - "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", - "dev": true, - "license": "MIT" - }, - "node_modules/tailwindcss": { - "version": "3.4.19", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", - "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@alloc/quick-lru": "^5.2.0", - "arg": "^5.0.2", - "chokidar": "^3.6.0", - "didyoumean": "^1.2.2", - "dlv": "^1.1.3", - "fast-glob": "^3.3.2", - "glob-parent": "^6.0.2", - "is-glob": "^4.0.3", - "jiti": "^1.21.7", - "lilconfig": "^3.1.3", - "micromatch": "^4.0.8", - "normalize-path": "^3.0.0", - "object-hash": "^3.0.0", - "picocolors": "^1.1.1", - "postcss": "^8.4.47", - "postcss-import": "^15.1.0", - "postcss-js": "^4.0.1", - "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", - "postcss-nested": "^6.2.0", - "postcss-selector-parser": "^6.1.2", - "resolve": "^1.22.8", - "sucrase": "^3.35.0" - }, - "bin": { - "tailwind": "lib/cli.js", - "tailwindcss": "lib/cli.js" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/test-exclude": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz", - "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==", - "dev": true, - "license": "ISC", - "dependencies": { - "@istanbuljs/schema": "^0.1.2", - "glob": "^10.4.1", - "minimatch": "^9.0.4" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/test-exclude/node_modules/glob": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", - "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "dev": true, - "license": "MIT" - }, - "node_modules/thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "dev": true, - "license": "MIT", - "dependencies": { - "any-promise": "^1.0.0" - } - }, - "node_modules/thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "dev": true, - "license": "MIT", - "dependencies": { - "thenify": ">= 3.1.0 < 4" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tiny-invariant": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", - "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinybench": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", - "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyexec": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", - "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyglobby": { - "version": "0.2.15", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", - "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "fdir": "^6.5.0", - "picomatch": "^4.0.3" - }, - "engines": { - "node": ">=12.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/SuperchupuDev" - } - }, - "node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/tinypool": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", - "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, - "node_modules/tinyrainbow": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", - "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/tinyspy": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", - "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/tough-cookie": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", - "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/tr46": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz", - "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==", - "dev": true, - "license": "MIT", - "dependencies": { - "punycode": "^2.3.1" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/ts-api-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz", - "integrity": "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=16" - }, - "peerDependencies": { - "typescript": ">=4.2.0" - } - }, - "node_modules/ts-interface-checker": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", - "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "license": "MIT", - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/typescript": { - "version": "5.6.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", - "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/unicorn-magic": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", - "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/universalify": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", - "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/update-browserslist-db": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", - "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "escalade": "^3.2.0", - "picocolors": "^1.1.1" - }, - "bin": { - "update-browserslist-db": "cli.js" - }, - "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/url-parse": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", - "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "node_modules/use-merge-value": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-merge-value/-/use-merge-value-1.2.0.tgz", - "integrity": "sha512-DXgG0kkgJN45TcyoXL49vJnn55LehnrmoHc7MbKi+QDBvr8dsesqws8UlyIWGHMR+JXgxc1nvY+jDGMlycsUcw==", - "license": "MIT", - "peerDependencies": { - "react": ">= 16.x" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "dev": true, - "license": "MIT" - }, - "node_modules/vite": { - "version": "5.4.21", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", - "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "^0.21.3", - "postcss": "^8.4.43", - "rollup": "^4.20.0" - }, - "bin": { - "vite": "bin/vite.js" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://github.com/vitejs/vite?sponsor=1" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - }, - "peerDependencies": { - "@types/node": "^18.0.0 || >=20.0.0", - "less": "*", - "lightningcss": "^1.21.0", - "sass": "*", - "sass-embedded": "*", - "stylus": "*", - "sugarss": "*", - "terser": "^5.4.0" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - }, - "less": { - "optional": true - }, - "lightningcss": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - }, - "stylus": { - "optional": true - }, - "sugarss": { - "optional": true - }, - "terser": { - "optional": true - } - } - }, - "node_modules/vite-node": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.9.tgz", - "integrity": "sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.3.7", - "es-module-lexer": "^1.5.4", - "pathe": "^1.1.2", - "vite": "^5.0.0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/vite-plugin-checker": { - "version": "0.9.3", - "resolved": "https://registry.npmjs.org/vite-plugin-checker/-/vite-plugin-checker-0.9.3.tgz", - "integrity": "sha512-Tf7QBjeBtG7q11zG0lvoF38/2AVUzzhMNu+Wk+mcsJ00Rk/FpJ4rmUviVJpzWkagbU13cGXvKpt7CMiqtxVTbQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.27.1", - "chokidar": "^4.0.3", - "npm-run-path": "^6.0.0", - "picocolors": "^1.1.1", - "picomatch": "^4.0.2", - "strip-ansi": "^7.1.0", - "tiny-invariant": "^1.3.3", - "tinyglobby": "^0.2.13", - "vscode-uri": "^3.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "peerDependencies": { - "@biomejs/biome": ">=1.7", - "eslint": ">=7", - "meow": "^13.2.0", - "optionator": "^0.9.4", - "stylelint": ">=16", - "typescript": "*", - "vite": ">=2.0.0", - "vls": "*", - "vti": "*", - "vue-tsc": "~2.2.10" - }, - "peerDependenciesMeta": { - "@biomejs/biome": { - "optional": true - }, - "eslint": { - "optional": true - }, - "meow": { - "optional": true - }, - "optionator": { - "optional": true - }, - "stylelint": { - "optional": true - }, - "typescript": { - "optional": true - }, - "vls": { - "optional": true - }, - "vti": { - "optional": true - }, - "vue-tsc": { - "optional": true - } - } - }, - "node_modules/vite-plugin-checker/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/vite-plugin-checker/node_modules/chokidar": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", - "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", - "dev": true, - "license": "MIT", - "dependencies": { - "readdirp": "^4.0.1" - }, - "engines": { - "node": ">= 14.16.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/vite-plugin-checker/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/vite-plugin-checker/node_modules/readdirp": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", - "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.18.0" - }, - "funding": { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/vite-plugin-checker/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/vitest": { - "version": "2.1.9", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.9.tgz", - "integrity": "sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/expect": "2.1.9", - "@vitest/mocker": "2.1.9", - "@vitest/pretty-format": "^2.1.9", - "@vitest/runner": "2.1.9", - "@vitest/snapshot": "2.1.9", - "@vitest/spy": "2.1.9", - "@vitest/utils": "2.1.9", - "chai": "^5.1.2", - "debug": "^4.3.7", - "expect-type": "^1.1.0", - "magic-string": "^0.30.12", - "pathe": "^1.1.2", - "std-env": "^3.8.0", - "tinybench": "^2.9.0", - "tinyexec": "^0.3.1", - "tinypool": "^1.0.1", - "tinyrainbow": "^1.2.0", - "vite": "^5.0.0", - "vite-node": "2.1.9", - "why-is-node-running": "^2.3.0" - }, - "bin": { - "vitest": "vitest.mjs" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@edge-runtime/vm": "*", - "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "2.1.9", - "@vitest/ui": "2.1.9", - "happy-dom": "*", - "jsdom": "*" - }, - "peerDependenciesMeta": { - "@edge-runtime/vm": { - "optional": true - }, - "@types/node": { - "optional": true - }, - "@vitest/browser": { - "optional": true - }, - "@vitest/ui": { - "optional": true - }, - "happy-dom": { - "optional": true - }, - "jsdom": { - "optional": true - } - } - }, - "node_modules/vscode-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz", - "integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/vue": { - "version": "3.5.26", - "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.26.tgz", - "integrity": "sha512-SJ/NTccVyAoNUJmkM9KUqPcYlY+u8OVL1X5EW9RIs3ch5H2uERxyyIUI4MRxVCSOiEcupX9xNGde1tL9ZKpimA==", - "license": "MIT", - "dependencies": { - "@vue/compiler-dom": "3.5.26", - "@vue/compiler-sfc": "3.5.26", - "@vue/runtime-dom": "3.5.26", - "@vue/server-renderer": "3.5.26", - "@vue/shared": "3.5.26" - }, - "peerDependencies": { - "typescript": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/vue-chartjs": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/vue-chartjs/-/vue-chartjs-5.3.3.tgz", - "integrity": "sha512-jqxtL8KZ6YJ5NTv6XzrzLS7osyegOi28UGNZW0h9OkDL7Sh1396ht4Dorh04aKrl2LiSalQ84WtqiG0RIJb0tA==", - "license": "MIT", - "peerDependencies": { - "chart.js": "^4.1.1", - "vue": "^3.0.0-0 || ^2.7.0" - } - }, - "node_modules/vue-component-type-helpers": { - "version": "2.2.12", - "resolved": "https://registry.npmjs.org/vue-component-type-helpers/-/vue-component-type-helpers-2.2.12.tgz", - "integrity": "sha512-YbGqHZ5/eW4SnkPNR44mKVc6ZKQoRs/Rux1sxC6rdwXb4qpbOSYfDr9DsTHolOTGmIKgM9j141mZbBeg05R1pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/vue-demi": { - "version": "0.14.10", - "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.10.tgz", - "integrity": "sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==", - "hasInstallScript": true, - "license": "MIT", - "bin": { - "vue-demi-fix": "bin/vue-demi-fix.js", - "vue-demi-switch": "bin/vue-demi-switch.js" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - }, - "peerDependencies": { - "@vue/composition-api": "^1.0.0-rc.1", - "vue": "^3.0.0-0 || ^2.6.0" - }, - "peerDependenciesMeta": { - "@vue/composition-api": { - "optional": true - } - } - }, - "node_modules/vue-eslint-parser": { - "version": "9.4.3", - "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-9.4.3.tgz", - "integrity": "sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^4.3.4", - "eslint-scope": "^7.1.1", - "eslint-visitor-keys": "^3.3.0", - "espree": "^9.3.1", - "esquery": "^1.4.0", - "lodash": "^4.17.21", - "semver": "^7.3.6" - }, - "engines": { - "node": "^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/mysticatea" - }, - "peerDependencies": { - "eslint": ">=6.0.0" - } - }, - "node_modules/vue-i18n": { - "version": "9.14.5", - "resolved": "https://registry.npmjs.org/vue-i18n/-/vue-i18n-9.14.5.tgz", - "integrity": "sha512-0jQ9Em3ymWngyiIkj0+c/k7WgaPO+TNzjKSNq9BvBQaKJECqn9cd9fL4tkDhB5G1QBskGl9YxxbDAhgbFtpe2g==", - "deprecated": "v9 and v10 no longer supported. please migrate to v11. about maintenance status, see https://vue-i18n.intlify.dev/guide/maintenance.html", - "license": "MIT", - "dependencies": { - "@intlify/core-base": "9.14.5", - "@intlify/shared": "9.14.5", - "@vue/devtools-api": "^6.5.0" - }, - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/kazupon" - }, - "peerDependencies": { - "vue": "^3.0.0" - } - }, - "node_modules/vue-router": { - "version": "4.6.4", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.4.tgz", - "integrity": "sha512-Hz9q5sa33Yhduglwz6g9skT8OBPii+4bFn88w6J+J4MfEo4KRRpmiNG/hHHkdbRFlLBOqxN8y8gf2Fb0MTUgVg==", - "license": "MIT", - "dependencies": { - "@vue/devtools-api": "^6.6.4" - }, - "funding": { - "url": "https://github.com/sponsors/posva" - }, - "peerDependencies": { - "vue": "^3.5.0" - } - }, - "node_modules/vue-tsc": { - "version": "2.2.12", - "resolved": "https://registry.npmjs.org/vue-tsc/-/vue-tsc-2.2.12.tgz", - "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@volar/typescript": "2.4.15", - "@vue/language-core": "2.2.12" - }, - "bin": { - "vue-tsc": "bin/vue-tsc.js" - }, - "peerDependencies": { - "typescript": ">=5.0.0" - } - }, - "node_modules/w3c-xmlserializer": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", - "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", - "dev": true, - "license": "MIT", - "dependencies": { - "xml-name-validator": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/w3c-xmlserializer/node_modules/xml-name-validator": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", - "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/webidl-conversions": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", - "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=12" - } - }, - "node_modules/whatwg-encoding": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", - "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", - "deprecated": "Use @exodus/bytes instead for a more spec-conformant and faster implementation", - "dev": true, - "license": "MIT", - "dependencies": { - "iconv-lite": "0.6.3" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/whatwg-mimetype": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", - "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - } - }, - "node_modules/whatwg-url": { - "version": "14.2.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz", - "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==", - "dev": true, - "license": "MIT", - "dependencies": { - "tr46": "^5.1.0", - "webidl-conversions": "^7.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/why-is-node-running": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", - "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", - "dev": true, - "license": "MIT", - "dependencies": { - "siginfo": "^2.0.0", - "stackback": "0.0.2" - }, - "bin": { - "why-is-node-running": "cli.js" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wmf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wmf/-/wmf-1.0.2.tgz", - "integrity": "sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/word": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/word/-/word-0.3.0.tgz", - "integrity": "sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/word-wrap": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", - "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/ws": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.19.0.tgz", - "integrity": "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xlsx": { - "version": "0.18.5", - "resolved": "https://registry.npmjs.org/xlsx/-/xlsx-0.18.5.tgz", - "integrity": "sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ==", - "license": "Apache-2.0", - "dependencies": { - "adler-32": "~1.3.0", - "cfb": "~1.2.1", - "codepage": "~1.15.0", - "crc-32": "~1.2.1", - "ssf": "~0.11.2", - "wmf": "~1.0.1", - "word": "~0.3.0" - }, - "bin": { - "xlsx": "bin/xlsx.njs" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/xml-name-validator": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", - "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12" - } - }, - "node_modules/xmlchars": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", - "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", - "dev": true, - "license": "MIT" - }, - "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", - "license": "ISC", - "engines": { - "node": ">= 6" - } - }, - "node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - } - } -} diff --git a/frontend/package.json b/frontend/package.json index c984cd96..38b92708 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -19,9 +19,12 @@ "@vueuse/core": "^10.7.0", "axios": "^1.6.2", "chart.js": "^4.4.1", + "dompurify": "^3.3.1", "driver.js": "^1.4.0", "file-saver": "^2.0.5", + "marked": "^17.0.1", "pinia": "^2.1.7", + "qrcode": "^1.5.4", "vue": "^3.4.0", "vue-chartjs": "^5.3.0", "vue-i18n": "^9.14.5", @@ -29,9 +32,11 @@ "xlsx": "^0.18.5" }, "devDependencies": { + "@types/dompurify": "^3.0.5", "@types/file-saver": "^2.0.7", "@types/mdx": "^2.0.13", "@types/node": "^20.10.5", + "@types/qrcode": "^1.5.6", "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "@vitejs/plugin-vue": "^5.2.3", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 1a808176..7dc73325 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -20,15 +20,24 @@ importers: chart.js: specifier: ^4.4.1 version: 4.5.1 + dompurify: + specifier: ^3.3.1 + version: 3.3.1 driver.js: specifier: ^1.4.0 version: 1.4.0 file-saver: specifier: ^2.0.5 version: 2.0.5 + marked: + specifier: ^17.0.1 + version: 17.0.1 pinia: specifier: ^2.1.7 version: 2.3.1(typescript@5.6.3)(vue@3.5.26(typescript@5.6.3)) + qrcode: + specifier: ^1.5.4 + version: 1.5.4 vue: specifier: ^3.4.0 version: 3.5.26(typescript@5.6.3) @@ -45,6 +54,9 @@ importers: specifier: ^0.18.5 version: 0.18.5 devDependencies: + '@types/dompurify': + specifier: ^3.0.5 + version: 3.2.0 '@types/file-saver': specifier: ^2.0.7 version: 2.0.7 @@ -54,6 +66,9 @@ importers: '@types/node': specifier: ^20.10.5 version: 20.19.27 + '@types/qrcode': + specifier: ^1.5.6 + version: 1.5.6 '@typescript-eslint/eslint-plugin': specifier: ^7.18.0 version: 7.18.0(@typescript-eslint/parser@7.18.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3) @@ -1239,56 +1254,67 @@ packages: resolution: {integrity: sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==} cpu: [arm] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm-musleabihf@4.54.0': resolution: {integrity: sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==} cpu: [arm] os: [linux] + libc: [musl] '@rollup/rollup-linux-arm64-gnu@4.54.0': resolution: {integrity: sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==} cpu: [arm64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm64-musl@4.54.0': resolution: {integrity: sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==} cpu: [arm64] os: [linux] + libc: [musl] '@rollup/rollup-linux-loong64-gnu@4.54.0': resolution: {integrity: sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==} cpu: [loong64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-ppc64-gnu@4.54.0': resolution: {integrity: sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==} cpu: [ppc64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-riscv64-gnu@4.54.0': resolution: {integrity: sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==} cpu: [riscv64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-riscv64-musl@4.54.0': resolution: {integrity: sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==} cpu: [riscv64] os: [linux] + libc: [musl] '@rollup/rollup-linux-s390x-gnu@4.54.0': resolution: {integrity: sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==} cpu: [s390x] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.54.0': resolution: {integrity: sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==} cpu: [x64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-musl@4.54.0': resolution: {integrity: sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==} cpu: [x64] os: [linux] + libc: [musl] '@rollup/rollup-openharmony-arm64@4.54.0': resolution: {integrity: sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==} @@ -1443,6 +1469,10 @@ packages: '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + '@types/dompurify@3.2.0': + resolution: {integrity: sha512-Fgg31wv9QbLDA0SpTOXO3MaxySc4DKGLi8sna4/Utjo4r3ZRPdCt4UQee8BWr+Q5z21yifghREPJGYaEOEIACg==} + deprecated: This is a stub types definition. dompurify provides its own type definitions, so you do not need this installed. + '@types/estree-jsx@1.0.5': resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} @@ -1479,6 +1509,9 @@ packages: '@types/parse-json@4.0.2': resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} + '@types/qrcode@1.5.6': + resolution: {integrity: sha512-te7NQcV2BOvdj2b1hCAHzAoMNuj65kNBMz0KBaxM6c3VGBOhU0dURQKOtH8CFNI/dsKkwlv32p26qYQTWoB5bw==} + '@types/react@19.2.7': resolution: {integrity: sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==} @@ -1832,6 +1865,10 @@ packages: resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} engines: {node: '>= 6'} + camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + caniuse-lite@1.0.30001761: resolution: {integrity: sha512-JF9ptu1vP2coz98+5051jZ4PwQgd2ni8A+gYSN7EA7dPKIMf0pDlSUxhdmVOaV3/fYK5uWBkgSXJaRLr4+3A6g==} @@ -1895,6 +1932,9 @@ packages: classnames@2.5.1: resolution: {integrity: sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==} + cliui@6.0.0: + resolution: {integrity: sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==} + clsx@1.2.1: resolution: {integrity: sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==} engines: {node: '>=6'} @@ -2164,6 +2204,10 @@ packages: supports-color: optional: true + decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + decimal.js@10.6.0: resolution: {integrity: sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==} @@ -2198,6 +2242,9 @@ packages: didyoumean@1.2.2: resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} + dijkstrajs@1.0.3: + resolution: {integrity: sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA==} + dir-glob@3.0.1: resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} engines: {node: '>=8'} @@ -2424,6 +2471,10 @@ packages: find-root@1.1.0: resolution: {integrity: sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==} + find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + find-up@5.0.0: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} @@ -2488,6 +2539,10 @@ packages: function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + get-east-asian-width@1.4.0: resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} engines: {node: '>=18'} @@ -2856,6 +2911,10 @@ packages: lit@3.3.2: resolution: {integrity: sha512-NF9zbsP79l4ao2SNrH3NkfmFgN/hBYSQo90saIVI1o5GpjAdCPVstVzO1MrLOakHoEhYkrtRjPK6Ob521aoYWQ==} + locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + locate-path@6.0.0: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} @@ -3239,14 +3298,26 @@ packages: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} engines: {node: '>= 0.8.0'} + p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + p-limit@3.1.0: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} + p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + p-locate@5.0.0: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} + p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + package-json-from-dist@1.0.1: resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} @@ -3341,6 +3412,10 @@ packages: pkg-types@1.3.1: resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + pngjs@5.0.0: + resolution: {integrity: sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw==} + engines: {node: '>=10.13.0'} + points-on-curve@0.2.0: resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} @@ -3421,6 +3496,11 @@ packages: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} + qrcode@1.5.4: + resolution: {integrity: sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg==} + engines: {node: '>=10.13.0'} + hasBin: true + query-string@9.3.1: resolution: {integrity: sha512-5fBfMOcDi5SA9qj5jZhWAcTtDfKF5WFdd2uD9nVNlbxVv1baq65aALy6qofpNEGELHvisjjasxQp7BlM9gvMzw==} engines: {node: '>=18'} @@ -3664,6 +3744,13 @@ packages: remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + require-main-filename@2.0.0: + resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} + requires-port@1.0.0: resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} @@ -3739,6 +3826,9 @@ packages: engines: {node: '>=10'} hasBin: true + set-blocking@2.0.0: + resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} + set-value@2.0.1: resolution: {integrity: sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==} engines: {node: '>=0.10.0'} @@ -4263,6 +4353,9 @@ packages: resolution: {integrity: sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==} engines: {node: '>=18'} + which-module@2.0.1: + resolution: {integrity: sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==} + which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} @@ -4285,6 +4378,10 @@ packages: resolution: {integrity: sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==} engines: {node: '>=0.8'} + wrap-ansi@6.2.0: + resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==} + engines: {node: '>=8'} + wrap-ansi@7.0.0: resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} engines: {node: '>=10'} @@ -4324,10 +4421,21 @@ packages: xmlchars@2.2.0: resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + y18n@4.0.3: + resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==} + yaml@1.10.2: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} + yargs-parser@18.1.3: + resolution: {integrity: sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==} + engines: {node: '>=6'} + + yargs@15.4.1: + resolution: {integrity: sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==} + engines: {node: '>=8'} + yocto-queue@0.1.0: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} @@ -5806,6 +5914,10 @@ snapshots: dependencies: '@types/ms': 2.1.0 + '@types/dompurify@3.2.0': + dependencies: + dompurify: 3.3.1 + '@types/estree-jsx@1.0.5': dependencies: '@types/estree': 1.0.8 @@ -5838,6 +5950,10 @@ snapshots: '@types/parse-json@4.0.2': {} + '@types/qrcode@1.5.6': + dependencies: + '@types/node': 20.19.27 + '@types/react@19.2.7': dependencies: csstype: 3.2.3 @@ -6321,6 +6437,8 @@ snapshots: camelcase-css@2.0.1: {} + camelcase@5.3.1: {} + caniuse-lite@1.0.30001761: {} ccount@2.0.1: {} @@ -6395,6 +6513,12 @@ snapshots: classnames@2.5.1: {} + cliui@6.0.0: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 6.2.0 + clsx@1.2.1: {} clsx@2.1.1: {} @@ -6668,6 +6792,8 @@ snapshots: dependencies: ms: 2.1.3 + decamelize@1.2.0: {} + decimal.js@10.6.0: {} decode-named-character-reference@1.2.0: @@ -6694,6 +6820,8 @@ snapshots: didyoumean@1.2.2: {} + dijkstrajs@1.0.3: {} + dir-glob@3.0.1: dependencies: path-type: 4.0.0 @@ -6978,6 +7106,11 @@ snapshots: find-root@1.1.0: {} + find-up@4.1.0: + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + find-up@5.0.0: dependencies: locate-path: 6.0.0 @@ -7029,6 +7162,8 @@ snapshots: function-bind@1.1.2: {} + get-caller-file@2.0.5: {} + get-east-asian-width@1.4.0: {} get-intrinsic@1.3.0: @@ -7521,6 +7656,10 @@ snapshots: lit-element: 4.2.2 lit-html: 3.3.2 + locate-path@5.0.0: + dependencies: + p-locate: 4.1.0 + locate-path@6.0.0: dependencies: p-locate: 5.0.0 @@ -8194,14 +8333,24 @@ snapshots: type-check: 0.4.0 word-wrap: 1.2.5 + p-limit@2.3.0: + dependencies: + p-try: 2.2.0 + p-limit@3.1.0: dependencies: yocto-queue: 0.1.0 + p-locate@4.1.0: + dependencies: + p-limit: 2.3.0 + p-locate@5.0.0: dependencies: p-limit: 3.1.0 + p-try@2.2.0: {} + package-json-from-dist@1.0.1: {} package-manager-detector@1.6.0: {} @@ -8284,6 +8433,8 @@ snapshots: mlly: 1.8.0 pathe: 2.0.3 + pngjs@5.0.0: {} + points-on-curve@0.2.0: {} points-on-path@0.2.1: @@ -8352,6 +8503,12 @@ snapshots: punycode@2.3.1: {} + qrcode@1.5.4: + dependencies: + dijkstrajs: 1.0.3 + pngjs: 5.0.0 + yargs: 15.4.1 + query-string@9.3.1: dependencies: decode-uri-component: 0.4.1 @@ -8703,6 +8860,10 @@ snapshots: mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + require-directory@2.1.1: {} + + require-main-filename@2.0.0: {} + requires-port@1.0.0: {} reselect@5.1.1: {} @@ -8788,6 +8949,8 @@ snapshots: semver@7.7.3: {} + set-blocking@2.0.0: {} + set-value@2.0.1: dependencies: extend-shallow: 2.0.1 @@ -9298,6 +9461,8 @@ snapshots: tr46: 5.1.1 webidl-conversions: 7.0.0 + which-module@2.0.1: {} + which@2.0.2: dependencies: isexe: 2.0.0 @@ -9313,6 +9478,12 @@ snapshots: word@0.3.0: {} + wrap-ansi@6.2.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi@7.0.0: dependencies: ansi-styles: 4.3.0 @@ -9345,8 +9516,29 @@ snapshots: xmlchars@2.2.0: {} + y18n@4.0.3: {} + yaml@1.10.2: {} + yargs-parser@18.1.3: + dependencies: + camelcase: 5.3.1 + decamelize: 1.2.0 + + yargs@15.4.1: + dependencies: + cliui: 6.0.0 + decamelize: 1.2.0 + find-up: 4.1.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + require-main-filename: 2.0.0 + set-blocking: 2.0.0 + string-width: 4.2.3 + which-module: 2.0.1 + y18n: 4.0.3 + yargs-parser: 18.1.3 + yocto-queue@0.1.0: {} zustand@3.7.2(react@19.2.3): diff --git a/frontend/src/api/admin/announcements.ts b/frontend/src/api/admin/announcements.ts new file mode 100644 index 00000000..d02fdda7 --- /dev/null +++ b/frontend/src/api/admin/announcements.ts @@ -0,0 +1,71 @@ +/** + * Admin Announcements API endpoints + */ + +import { apiClient } from '../client' +import type { + Announcement, + AnnouncementUserReadStatus, + BasePaginationResponse, + CreateAnnouncementRequest, + UpdateAnnouncementRequest +} from '@/types' + +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + status?: string + search?: string + } +): Promise> { + const { data } = await apiClient.get>('/admin/announcements', { + params: { page, page_size: pageSize, ...filters } + }) + return data +} + +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/announcements/${id}`) + return data +} + +export async function create(request: CreateAnnouncementRequest): Promise { + const { data } = await apiClient.post('/admin/announcements', request) + return data +} + +export async function update(id: number, request: UpdateAnnouncementRequest): Promise { + const { data } = await apiClient.put(`/admin/announcements/${id}`, request) + return data +} + +export async function deleteAnnouncement(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/announcements/${id}`) + return data +} + +export async function getReadStatus( + id: number, + page: number = 1, + pageSize: number = 20, + search: string = '' +): Promise> { + const { data } = await apiClient.get>( + `/admin/announcements/${id}/read-status`, + { params: { page, page_size: pageSize, search } } + ) + return data +} + +const announcementsAPI = { + list, + getById, + create, + update, + delete: deleteAnnouncement, + getReadStatus +} + +export default announcementsAPI + diff --git a/frontend/src/api/admin/dashboard.ts b/frontend/src/api/admin/dashboard.ts index 9b338788..ae48bec2 100644 --- a/frontend/src/api/admin/dashboard.ts +++ b/frontend/src/api/admin/dashboard.ts @@ -50,6 +50,7 @@ export interface TrendParams { account_id?: number group_id?: number stream?: boolean + billing_type?: number | null } export interface TrendResponse { @@ -78,6 +79,7 @@ export interface ModelStatsParams { account_id?: number group_id?: number stream?: boolean + billing_type?: number | null } export interface ModelStatsResponse { diff --git a/frontend/src/api/admin/groups.ts b/frontend/src/api/admin/groups.ts index 44eebc99..4d2b10ef 100644 --- a/frontend/src/api/admin/groups.ts +++ b/frontend/src/api/admin/groups.ts @@ -5,7 +5,7 @@ import { apiClient } from '../client' import type { - Group, + AdminGroup, GroupPlatform, CreateGroupRequest, UpdateGroupRequest, @@ -31,8 +31,8 @@ export async function list( options?: { signal?: AbortSignal } -): Promise> { - const { data } = await apiClient.get>('/admin/groups', { +): Promise> { + const { data } = await apiClient.get>('/admin/groups', { params: { page, page_size: pageSize, @@ -48,8 +48,8 @@ export async function list( * @param platform - Optional platform filter * @returns List of all active groups */ -export async function getAll(platform?: GroupPlatform): Promise { - const { data } = await apiClient.get('/admin/groups/all', { +export async function getAll(platform?: GroupPlatform): Promise { + const { data } = await apiClient.get('/admin/groups/all', { params: platform ? { platform } : undefined }) return data @@ -60,7 +60,7 @@ export async function getAll(platform?: GroupPlatform): Promise { * @param platform - Platform to filter by * @returns List of groups for the specified platform */ -export async function getByPlatform(platform: GroupPlatform): Promise { +export async function getByPlatform(platform: GroupPlatform): Promise { return getAll(platform) } @@ -69,8 +69,8 @@ export async function getByPlatform(platform: GroupPlatform): Promise { * @param id - Group ID * @returns Group details */ -export async function getById(id: number): Promise { - const { data } = await apiClient.get(`/admin/groups/${id}`) +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/groups/${id}`) return data } @@ -79,8 +79,8 @@ export async function getById(id: number): Promise { * @param groupData - Group data * @returns Created group */ -export async function create(groupData: CreateGroupRequest): Promise { - const { data } = await apiClient.post('/admin/groups', groupData) +export async function create(groupData: CreateGroupRequest): Promise { + const { data } = await apiClient.post('/admin/groups', groupData) return data } @@ -90,8 +90,8 @@ export async function create(groupData: CreateGroupRequest): Promise { * @param updates - Fields to update * @returns Updated group */ -export async function update(id: number, updates: UpdateGroupRequest): Promise { - const { data } = await apiClient.put(`/admin/groups/${id}`, updates) +export async function update(id: number, updates: UpdateGroupRequest): Promise { + const { data } = await apiClient.put(`/admin/groups/${id}`, updates) return data } @@ -111,7 +111,7 @@ export async function deleteGroup(id: number): Promise<{ message: string }> { * @param status - New status * @returns Updated group */ -export async function toggleStatus(id: number, status: 'active' | 'inactive'): Promise { +export async function toggleStatus(id: number, status: 'active' | 'inactive'): Promise { return update(id, { status }) } diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts index e86f6348..a88b02c6 100644 --- a/frontend/src/api/admin/index.ts +++ b/frontend/src/api/admin/index.ts @@ -10,6 +10,7 @@ import accountsAPI from './accounts' import proxiesAPI from './proxies' import redeemAPI from './redeem' import promoAPI from './promo' +import announcementsAPI from './announcements' import settingsAPI from './settings' import systemAPI from './system' import subscriptionsAPI from './subscriptions' @@ -30,6 +31,7 @@ export const adminAPI = { proxies: proxiesAPI, redeem: redeemAPI, promo: promoAPI, + announcements: announcementsAPI, settings: settingsAPI, system: systemAPI, subscriptions: subscriptionsAPI, @@ -48,6 +50,7 @@ export { proxiesAPI, redeemAPI, promoAPI, + announcementsAPI, settingsAPI, systemAPI, subscriptionsAPI, diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 11a98e46..a1c41e8c 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -781,6 +781,7 @@ export interface OpsAdvancedSettings { ignore_count_tokens_errors: boolean ignore_context_canceled: boolean ignore_no_available_accounts: boolean + ignore_invalid_api_key_errors: boolean auto_refresh_enabled: boolean auto_refresh_interval_seconds: number } diff --git a/frontend/src/api/admin/settings.ts b/frontend/src/api/admin/settings.ts index fc72be8d..a0595e4f 100644 --- a/frontend/src/api/admin/settings.ts +++ b/frontend/src/api/admin/settings.ts @@ -12,6 +12,10 @@ export interface SystemSettings { // Registration settings registration_enabled: boolean email_verify_enabled: boolean + promo_code_enabled: boolean + password_reset_enabled: boolean + totp_enabled: boolean // TOTP 双因素认证 + totp_encryption_key_configured: boolean // TOTP 加密密钥是否已配置 // Default settings default_balance: number default_concurrency: number @@ -23,6 +27,9 @@ export interface SystemSettings { contact_info: string doc_url: string home_content: string + hide_ccs_import_button: boolean + purchase_subscription_enabled: boolean + purchase_subscription_url: string // SMTP settings smtp_host: string smtp_port: number @@ -63,6 +70,9 @@ export interface SystemSettings { export interface UpdateSettingsRequest { registration_enabled?: boolean email_verify_enabled?: boolean + promo_code_enabled?: boolean + password_reset_enabled?: boolean + totp_enabled?: boolean // TOTP 双因素认证 default_balance?: number default_concurrency?: number site_name?: string @@ -72,6 +82,9 @@ export interface UpdateSettingsRequest { contact_info?: string doc_url?: string home_content?: string + hide_ccs_import_button?: boolean + purchase_subscription_enabled?: boolean + purchase_subscription_url?: string smtp_host?: string smtp_port?: number smtp_username?: string diff --git a/frontend/src/api/admin/subscriptions.ts b/frontend/src/api/admin/subscriptions.ts index 54b448e2..9f21056f 100644 --- a/frontend/src/api/admin/subscriptions.ts +++ b/frontend/src/api/admin/subscriptions.ts @@ -17,7 +17,7 @@ import type { * List all subscriptions with pagination * @param page - Page number (default: 1) * @param pageSize - Items per page (default: 20) - * @param filters - Optional filters (status, user_id, group_id) + * @param filters - Optional filters (status, user_id, group_id, sort_by, sort_order) * @returns Paginated list of subscriptions */ export async function list( @@ -27,6 +27,8 @@ export async function list( status?: 'active' | 'expired' | 'revoked' user_id?: number group_id?: number + sort_by?: string + sort_order?: 'asc' | 'desc' }, options?: { signal?: AbortSignal diff --git a/frontend/src/api/admin/usage.ts b/frontend/src/api/admin/usage.ts index dd85fc24..94f7b57b 100644 --- a/frontend/src/api/admin/usage.ts +++ b/frontend/src/api/admin/usage.ts @@ -4,7 +4,7 @@ */ import { apiClient } from '../client' -import type { UsageLog, UsageQueryParams, PaginatedResponse } from '@/types' +import type { AdminUsageLog, UsageQueryParams, PaginatedResponse } from '@/types' // ==================== Types ==================== @@ -31,6 +31,46 @@ export interface SimpleApiKey { user_id: number } +export interface UsageCleanupFilters { + start_time: string + end_time: string + user_id?: number + api_key_id?: number + account_id?: number + group_id?: number + model?: string | null + stream?: boolean | null + billing_type?: number | null +} + +export interface UsageCleanupTask { + id: number + status: string + filters: UsageCleanupFilters + created_by: number + deleted_rows: number + error_message?: string | null + canceled_by?: number | null + canceled_at?: string | null + started_at?: string | null + finished_at?: string | null + created_at: string + updated_at: string +} + +export interface CreateUsageCleanupTaskRequest { + start_date: string + end_date: string + user_id?: number + api_key_id?: number + account_id?: number + group_id?: number + model?: string | null + stream?: boolean | null + billing_type?: number | null + timezone?: string +} + export interface AdminUsageQueryParams extends UsageQueryParams { user_id?: number } @@ -45,8 +85,8 @@ export interface AdminUsageQueryParams extends UsageQueryParams { export async function list( params: AdminUsageQueryParams, options?: { signal?: AbortSignal } -): Promise> { - const { data } = await apiClient.get>('/admin/usage', { +): Promise> { + const { data } = await apiClient.get>('/admin/usage', { params, signal: options?.signal }) @@ -108,11 +148,51 @@ export async function searchApiKeys(userId?: number, keyword?: string): Promise< return data } +/** + * List usage cleanup tasks (admin only) + * @param params - Query parameters for pagination + * @returns Paginated list of cleanup tasks + */ +export async function listCleanupTasks( + params: { page?: number; page_size?: number }, + options?: { signal?: AbortSignal } +): Promise> { + const { data } = await apiClient.get>('/admin/usage/cleanup-tasks', { + params, + signal: options?.signal + }) + return data +} + +/** + * Create a usage cleanup task (admin only) + * @param payload - Cleanup task parameters + * @returns Created cleanup task + */ +export async function createCleanupTask(payload: CreateUsageCleanupTaskRequest): Promise { + const { data } = await apiClient.post('/admin/usage/cleanup-tasks', payload) + return data +} + +/** + * Cancel a usage cleanup task (admin only) + * @param taskId - Task ID to cancel + */ +export async function cancelCleanupTask(taskId: number): Promise<{ id: number; status: string }> { + const { data } = await apiClient.post<{ id: number; status: string }>( + `/admin/usage/cleanup-tasks/${taskId}/cancel` + ) + return data +} + export const adminUsageAPI = { list, getStats, searchUsers, - searchApiKeys + searchApiKeys, + listCleanupTasks, + createCleanupTask, + cancelCleanupTask } export default adminUsageAPI diff --git a/frontend/src/api/admin/users.ts b/frontend/src/api/admin/users.ts index 44963cf9..734e3ac7 100644 --- a/frontend/src/api/admin/users.ts +++ b/frontend/src/api/admin/users.ts @@ -4,7 +4,7 @@ */ import { apiClient } from '../client' -import type { User, UpdateUserRequest, PaginatedResponse } from '@/types' +import type { AdminUser, UpdateUserRequest, PaginatedResponse } from '@/types' /** * List all users with pagination @@ -26,7 +26,7 @@ export async function list( options?: { signal?: AbortSignal } -): Promise> { +): Promise> { // Build params with attribute filters in attr[id]=value format const params: Record = { page, @@ -44,8 +44,7 @@ export async function list( } } } - - const { data } = await apiClient.get>('/admin/users', { + const { data } = await apiClient.get>('/admin/users', { params, signal: options?.signal }) @@ -57,8 +56,8 @@ export async function list( * @param id - User ID * @returns User details */ -export async function getById(id: number): Promise { - const { data } = await apiClient.get(`/admin/users/${id}`) +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/users/${id}`) return data } @@ -73,8 +72,8 @@ export async function create(userData: { balance?: number concurrency?: number allowed_groups?: number[] | null -}): Promise { - const { data } = await apiClient.post('/admin/users', userData) +}): Promise { + const { data } = await apiClient.post('/admin/users', userData) return data } @@ -84,8 +83,8 @@ export async function create(userData: { * @param updates - Fields to update * @returns Updated user */ -export async function update(id: number, updates: UpdateUserRequest): Promise { - const { data } = await apiClient.put(`/admin/users/${id}`, updates) +export async function update(id: number, updates: UpdateUserRequest): Promise { + const { data } = await apiClient.put(`/admin/users/${id}`, updates) return data } @@ -112,8 +111,8 @@ export async function updateBalance( balance: number, operation: 'set' | 'add' | 'subtract' = 'set', notes?: string -): Promise { - const { data } = await apiClient.post(`/admin/users/${id}/balance`, { +): Promise { + const { data } = await apiClient.post(`/admin/users/${id}/balance`, { balance, operation, notes: notes || '' @@ -127,7 +126,7 @@ export async function updateBalance( * @param concurrency - New concurrency limit * @returns Updated user */ -export async function updateConcurrency(id: number, concurrency: number): Promise { +export async function updateConcurrency(id: number, concurrency: number): Promise { return update(id, { concurrency }) } @@ -137,7 +136,7 @@ export async function updateConcurrency(id: number, concurrency: number): Promis * @param status - New status * @returns Updated user */ -export async function toggleStatus(id: number, status: 'active' | 'disabled'): Promise { +export async function toggleStatus(id: number, status: 'active' | 'disabled'): Promise { return update(id, { status }) } diff --git a/frontend/src/api/announcements.ts b/frontend/src/api/announcements.ts new file mode 100644 index 00000000..a9034e2a --- /dev/null +++ b/frontend/src/api/announcements.ts @@ -0,0 +1,26 @@ +/** + * User Announcements API endpoints + */ + +import { apiClient } from './client' +import type { UserAnnouncement } from '@/types' + +export async function list(unreadOnly: boolean = false): Promise { + const { data } = await apiClient.get('/announcements', { + params: unreadOnly ? { unread_only: 1 } : {} + }) + return data +} + +export async function markRead(id: number): Promise<{ message: string }> { + const { data } = await apiClient.post<{ message: string }>(`/announcements/${id}/read`) + return data +} + +const announcementsAPI = { + list, + markRead +} + +export default announcementsAPI + diff --git a/frontend/src/api/auth.ts b/frontend/src/api/auth.ts index fddc23ef..bbd5ed74 100644 --- a/frontend/src/api/auth.ts +++ b/frontend/src/api/auth.ts @@ -11,9 +11,23 @@ import type { CurrentUserResponse, SendVerifyCodeRequest, SendVerifyCodeResponse, - PublicSettings + PublicSettings, + TotpLoginResponse, + TotpLogin2FARequest } from '@/types' +/** + * Login response type - can be either full auth or 2FA required + */ +export type LoginResponse = AuthResponse | TotpLoginResponse + +/** + * Type guard to check if login response requires 2FA + */ +export function isTotp2FARequired(response: LoginResponse): response is TotpLoginResponse { + return 'requires_2fa' in response && response.requires_2fa === true +} + /** * Store authentication token in localStorage */ @@ -38,11 +52,28 @@ export function clearAuthToken(): void { /** * User login - * @param credentials - Username and password + * @param credentials - Email and password + * @returns Authentication response with token and user data, or 2FA required response + */ +export async function login(credentials: LoginRequest): Promise { + const { data } = await apiClient.post('/auth/login', credentials) + + // Only store token if 2FA is not required + if (!isTotp2FARequired(data)) { + setAuthToken(data.access_token) + localStorage.setItem('auth_user', JSON.stringify(data.user)) + } + + return data +} + +/** + * Complete login with 2FA code + * @param request - Temp token and TOTP code * @returns Authentication response with token and user data */ -export async function login(credentials: LoginRequest): Promise { - const { data } = await apiClient.post('/auth/login', credentials) +export async function login2FA(request: TotpLogin2FARequest): Promise { + const { data } = await apiClient.post('/auth/login/2fa', request) // Store token and user data setAuthToken(data.access_token) @@ -133,8 +164,61 @@ export async function validatePromoCode(code: string): Promise { + const { data } = await apiClient.post('/auth/forgot-password', request) + return data +} + +/** + * Reset password request + */ +export interface ResetPasswordRequest { + email: string + token: string + new_password: string +} + +/** + * Reset password response + */ +export interface ResetPasswordResponse { + message: string +} + +/** + * Reset password with token + * @param request - Email, token, and new password + * @returns Response with message + */ +export async function resetPassword(request: ResetPasswordRequest): Promise { + const { data } = await apiClient.post('/auth/reset-password', request) + return data +} + export const authAPI = { login, + login2FA, + isTotp2FARequired, register, getCurrentUser, logout, @@ -144,7 +228,9 @@ export const authAPI = { clearAuthToken, getPublicSettings, sendVerifyCode, - validatePromoCode + validatePromoCode, + forgotPassword, + resetPassword } export default authAPI diff --git a/frontend/src/api/index.ts b/frontend/src/api/index.ts index 50b14c4c..070ce648 100644 --- a/frontend/src/api/index.ts +++ b/frontend/src/api/index.ts @@ -7,7 +7,7 @@ export { apiClient } from './client' // Auth API -export { authAPI } from './auth' +export { authAPI, isTotp2FARequired, type LoginResponse } from './auth' // User APIs export { keysAPI } from './keys' @@ -15,6 +15,8 @@ export { usageAPI } from './usage' export { userAPI } from './user' export { redeemAPI, type RedeemHistoryItem } from './redeem' export { userGroupsAPI } from './groups' +export { totpAPI } from './totp' +export { default as announcementsAPI } from './announcements' // Admin APIs export { adminAPI } from './admin' diff --git a/frontend/src/api/redeem.ts b/frontend/src/api/redeem.ts index 9e1c7d94..22abf4d8 100644 --- a/frontend/src/api/redeem.ts +++ b/frontend/src/api/redeem.ts @@ -14,7 +14,9 @@ export interface RedeemHistoryItem { status: string used_at: string created_at: string - // 订阅类型专用字段 + // Notes from admin for admin_balance/admin_concurrency types + notes?: string + // Subscription-specific fields group_id?: number validity_days?: number group?: { diff --git a/frontend/src/api/setup.ts b/frontend/src/api/setup.ts index 8b744590..1097c95b 100644 --- a/frontend/src/api/setup.ts +++ b/frontend/src/api/setup.ts @@ -31,6 +31,7 @@ export interface RedisConfig { port: number password: string db: number + enable_tls: boolean } export interface AdminConfig { diff --git a/frontend/src/api/totp.ts b/frontend/src/api/totp.ts new file mode 100644 index 00000000..cd658acb --- /dev/null +++ b/frontend/src/api/totp.ts @@ -0,0 +1,83 @@ +/** + * TOTP (2FA) API endpoints + * Handles Two-Factor Authentication with Google Authenticator + */ + +import { apiClient } from './client' +import type { + TotpStatus, + TotpSetupRequest, + TotpSetupResponse, + TotpEnableRequest, + TotpEnableResponse, + TotpDisableRequest, + TotpVerificationMethod +} from '@/types' + +/** + * Get TOTP status for current user + * @returns TOTP status including enabled state and feature availability + */ +export async function getStatus(): Promise { + const { data } = await apiClient.get('/user/totp/status') + return data +} + +/** + * Get verification method for TOTP operations + * @returns Method ('email' or 'password') required for setup/disable + */ +export async function getVerificationMethod(): Promise { + const { data } = await apiClient.get('/user/totp/verification-method') + return data +} + +/** + * Send email verification code for TOTP operations + * @returns Success response + */ +export async function sendVerifyCode(): Promise<{ success: boolean }> { + const { data } = await apiClient.post<{ success: boolean }>('/user/totp/send-code') + return data +} + +/** + * Initiate TOTP setup - generates secret and QR code + * @param request - Email code or password depending on verification method + * @returns Setup response with secret, QR code URL, and setup token + */ +export async function initiateSetup(request?: TotpSetupRequest): Promise { + const { data } = await apiClient.post('/user/totp/setup', request || {}) + return data +} + +/** + * Complete TOTP setup by verifying the code + * @param request - TOTP code and setup token + * @returns Enable response with success status and enabled timestamp + */ +export async function enable(request: TotpEnableRequest): Promise { + const { data } = await apiClient.post('/user/totp/enable', request) + return data +} + +/** + * Disable TOTP for current user + * @param request - Email code or password depending on verification method + * @returns Success response + */ +export async function disable(request: TotpDisableRequest): Promise<{ success: boolean }> { + const { data } = await apiClient.post<{ success: boolean }>('/user/totp/disable', request) + return data +} + +export const totpAPI = { + getStatus, + getVerificationMethod, + sendVerifyCode, + initiateSetup, + enable, + disable +} + +export default totpAPI diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index 3c8aba97..9bf1d8dd 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -1,18 +1,32 @@ @@ -27,6 +28,7 @@ import StatCard from '@/components/common/StatCard.vue' import ProfileInfoCard from '@/components/user/profile/ProfileInfoCard.vue' import ProfileEditForm from '@/components/user/profile/ProfileEditForm.vue' import ProfilePasswordForm from '@/components/user/profile/ProfilePasswordForm.vue' +import ProfileTotpCard from '@/components/user/profile/ProfileTotpCard.vue' import { Icon } from '@/components/icons' const { t } = useI18n(); const authStore = useAuthStore(); const user = computed(() => authStore.user) diff --git a/frontend/src/views/user/PurchaseSubscriptionView.vue b/frontend/src/views/user/PurchaseSubscriptionView.vue new file mode 100644 index 00000000..55bcf307 --- /dev/null +++ b/frontend/src/views/user/PurchaseSubscriptionView.vue @@ -0,0 +1,121 @@ + + + + + + diff --git a/frontend/src/views/user/RedeemView.vue b/frontend/src/views/user/RedeemView.vue index 96158596..5850c084 100644 --- a/frontend/src/views/user/RedeemView.vue +++ b/frontend/src/views/user/RedeemView.vue @@ -312,6 +312,14 @@

{{ t('redeem.adminAdjustment') }}

+ +

+ {{ item.notes }} +

diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json index a1731cfb..82ae3f9f 100644 --- a/frontend/tsconfig.json +++ b/frontend/tsconfig.json @@ -21,5 +21,6 @@ "types": ["vite/client"] }, "include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue"], + "exclude": ["src/**/__tests__/**", "src/**/*.spec.ts", "src/**/*.test.ts"], "references": [{ "path": "./tsconfig.node.json" }] } diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts index 267158ea..d88c6eed 100644 --- a/frontend/vite.config.ts +++ b/frontend/vite.config.ts @@ -1,4 +1,4 @@ -import { defineConfig, Plugin } from 'vite' +import { defineConfig, loadEnv, Plugin } from 'vite' import vue from '@vitejs/plugin-vue' import checker from 'vite-plugin-checker' import { resolve } from 'path' @@ -7,9 +7,7 @@ import { resolve } from 'path' * Vite 插件:开发模式下注入公开配置到 index.html * 与生产模式的后端注入行为保持一致,消除闪烁 */ -function injectPublicSettings(): Plugin { - const backendUrl = process.env.VITE_DEV_PROXY_TARGET || 'http://localhost:8080' - +function injectPublicSettings(backendUrl: string): Plugin { return { name: 'inject-public-settings', transformIndexHtml: { @@ -35,15 +33,21 @@ function injectPublicSettings(): Plugin { } } -export default defineConfig({ - plugins: [ - vue(), - checker({ - typescript: true, - vueTsc: true - }), - injectPublicSettings() - ], +export default defineConfig(({ mode }) => { + // 加载环境变量 + const env = loadEnv(mode, process.cwd(), '') + const backendUrl = env.VITE_DEV_PROXY_TARGET || 'http://localhost:8080' + const devPort = Number(env.VITE_DEV_PORT || 3000) + + return { + plugins: [ + vue(), + checker({ + typescript: true, + vueTsc: true + }), + injectPublicSettings(backendUrl) + ], resolve: { alias: { '@': resolve(__dirname, 'src'), @@ -102,17 +106,18 @@ export default defineConfig({ } } }, - server: { - host: '0.0.0.0', - port: Number(process.env.VITE_DEV_PORT || 3000), - proxy: { - '/api': { - target: process.env.VITE_DEV_PROXY_TARGET || 'http://localhost:8080', - changeOrigin: true - }, - '/setup': { - target: process.env.VITE_DEV_PROXY_TARGET || 'http://localhost:8080', - changeOrigin: true + server: { + host: '0.0.0.0', + port: devPort, + proxy: { + '/api': { + target: backendUrl, + changeOrigin: true + }, + '/setup': { + target: backendUrl, + changeOrigin: true + } } } } From 3ecadf4aad86c5befe5a72b0e033ab74ce8bcfcd Mon Sep 17 00:00:00 2001 From: song Date: Mon, 2 Feb 2026 22:20:08 +0800 Subject: [PATCH 083/214] chore: apply stashed changes --- backend/ent/group.go | 15 +- backend/ent/group/group.go | 5 + backend/ent/group_create.go | 57 ++++ backend/ent/group_update.go | 41 +++ backend/ent/migrate/schema.go | 1 + backend/ent/mutation.go | 72 ++++- backend/ent/runtime/runtime.go | 4 + backend/ent/schema/group.go | 6 + backend/go.mod | 18 +- backend/go.sum | 52 +++- backend/internal/domain/constants.go | 1 + .../internal/handler/admin/account_handler.go | 4 +- .../internal/handler/admin/group_handler.go | 6 + backend/internal/handler/dto/types.go | 2 + backend/internal/repository/api_key_repo.go | 2 + backend/internal/repository/group_repo.go | 7 +- backend/internal/service/admin_service.go | 10 + .../service/antigravity_gateway_service.go | 293 ++++++++++++++++++ .../service/antigravity_quota_scope.go | 16 + .../internal/service/api_key_auth_cache.go | 3 + .../service/api_key_auth_cache_impl.go | 2 + backend/internal/service/domain_constants.go | 1 + backend/internal/service/gateway_service.go | 38 +++ backend/internal/service/group.go | 4 + backend/internal/service/user.go | 5 + backend/internal/service/user_service.go | 2 +- .../046_add_group_supported_model_scopes.sql | 6 + docs/rename_local_migrations_20260202.sql | 27 ++ .../components/account/CreateAccountModal.vue | 136 +++++++- frontend/src/i18n/locales/en.ts | 21 +- frontend/src/i18n/locales/zh.ts | 19 ++ frontend/src/types/index.ts | 9 +- frontend/src/views/admin/GroupsView.vue | 140 +++++++++ 33 files changed, 997 insertions(+), 28 deletions(-) create mode 100644 backend/migrations/046_add_group_supported_model_scopes.sql create mode 100644 docs/rename_local_migrations_20260202.sql diff --git a/backend/ent/group.go b/backend/ent/group.go index d2b6af9f..1eb05e0e 100644 --- a/backend/ent/group.go +++ b/backend/ent/group.go @@ -64,6 +64,8 @@ type Group struct { ModelRoutingEnabled bool `json:"model_routing_enabled,omitempty"` // 是否注入 MCP XML 调用协议提示词(仅 antigravity 平台) McpXMLInject bool `json:"mcp_xml_inject,omitempty"` + // 支持的模型系列:claude, gemini_text, gemini_image + SupportedModelScopes []string `json:"supported_model_scopes,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the GroupQuery when eager-loading is set. Edges GroupEdges `json:"edges"` @@ -170,7 +172,7 @@ func (*Group) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case group.FieldModelRouting: + case group.FieldModelRouting, group.FieldSupportedModelScopes: values[i] = new([]byte) case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject: values[i] = new(sql.NullBool) @@ -353,6 +355,14 @@ func (_m *Group) assignValues(columns []string, values []any) error { } else if value.Valid { _m.McpXMLInject = value.Bool } + case group.FieldSupportedModelScopes: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field supported_model_scopes", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.SupportedModelScopes); err != nil { + return fmt.Errorf("unmarshal field supported_model_scopes: %w", err) + } + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -517,6 +527,9 @@ func (_m *Group) String() string { builder.WriteString(", ") builder.WriteString("mcp_xml_inject=") builder.WriteString(fmt.Sprintf("%v", _m.McpXMLInject)) + builder.WriteString(", ") + builder.WriteString("supported_model_scopes=") + builder.WriteString(fmt.Sprintf("%v", _m.SupportedModelScopes)) builder.WriteByte(')') return builder.String() } diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go index aa9ff2ab..278b2daf 100644 --- a/backend/ent/group/group.go +++ b/backend/ent/group/group.go @@ -61,6 +61,8 @@ const ( FieldModelRoutingEnabled = "model_routing_enabled" // FieldMcpXMLInject holds the string denoting the mcp_xml_inject field in the database. FieldMcpXMLInject = "mcp_xml_inject" + // FieldSupportedModelScopes holds the string denoting the supported_model_scopes field in the database. + FieldSupportedModelScopes = "supported_model_scopes" // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. EdgeAPIKeys = "api_keys" // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. @@ -159,6 +161,7 @@ var Columns = []string{ FieldModelRouting, FieldModelRoutingEnabled, FieldMcpXMLInject, + FieldSupportedModelScopes, } var ( @@ -220,6 +223,8 @@ var ( DefaultModelRoutingEnabled bool // DefaultMcpXMLInject holds the default value on creation for the "mcp_xml_inject" field. DefaultMcpXMLInject bool + // DefaultSupportedModelScopes holds the default value on creation for the "supported_model_scopes" field. + DefaultSupportedModelScopes []string ) // OrderOption defines the ordering options for the Group queries. diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go index b1ccc8e3..9d845b61 100644 --- a/backend/ent/group_create.go +++ b/backend/ent/group_create.go @@ -334,6 +334,12 @@ func (_c *GroupCreate) SetNillableMcpXMLInject(v *bool) *GroupCreate { return _c } +// SetSupportedModelScopes sets the "supported_model_scopes" field. +func (_c *GroupCreate) SetSupportedModelScopes(v []string) *GroupCreate { + _c.mutation.SetSupportedModelScopes(v) + return _c +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate { _c.mutation.AddAPIKeyIDs(ids...) @@ -511,6 +517,10 @@ func (_c *GroupCreate) defaults() error { v := group.DefaultMcpXMLInject _c.mutation.SetMcpXMLInject(v) } + if _, ok := _c.mutation.SupportedModelScopes(); !ok { + v := group.DefaultSupportedModelScopes + _c.mutation.SetSupportedModelScopes(v) + } return nil } @@ -572,6 +582,9 @@ func (_c *GroupCreate) check() error { if _, ok := _c.mutation.McpXMLInject(); !ok { return &ValidationError{Name: "mcp_xml_inject", err: errors.New(`ent: missing required field "Group.mcp_xml_inject"`)} } + if _, ok := _c.mutation.SupportedModelScopes(); !ok { + return &ValidationError{Name: "supported_model_scopes", err: errors.New(`ent: missing required field "Group.supported_model_scopes"`)} + } return nil } @@ -691,6 +704,10 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { _spec.SetField(group.FieldMcpXMLInject, field.TypeBool, value) _node.McpXMLInject = value } + if value, ok := _c.mutation.SupportedModelScopes(); ok { + _spec.SetField(group.FieldSupportedModelScopes, field.TypeJSON, value) + _node.SupportedModelScopes = value + } if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1237,6 +1254,18 @@ func (u *GroupUpsert) UpdateMcpXMLInject() *GroupUpsert { return u } +// SetSupportedModelScopes sets the "supported_model_scopes" field. +func (u *GroupUpsert) SetSupportedModelScopes(v []string) *GroupUpsert { + u.Set(group.FieldSupportedModelScopes, v) + return u +} + +// UpdateSupportedModelScopes sets the "supported_model_scopes" field to the value that was provided on create. +func (u *GroupUpsert) UpdateSupportedModelScopes() *GroupUpsert { + u.SetExcluded(group.FieldSupportedModelScopes) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create. // Using this option is equivalent to using: // @@ -1737,6 +1766,20 @@ func (u *GroupUpsertOne) UpdateMcpXMLInject() *GroupUpsertOne { }) } +// SetSupportedModelScopes sets the "supported_model_scopes" field. +func (u *GroupUpsertOne) SetSupportedModelScopes(v []string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetSupportedModelScopes(v) + }) +} + +// UpdateSupportedModelScopes sets the "supported_model_scopes" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateSupportedModelScopes() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateSupportedModelScopes() + }) +} + // Exec executes the query. func (u *GroupUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -2403,6 +2446,20 @@ func (u *GroupUpsertBulk) UpdateMcpXMLInject() *GroupUpsertBulk { }) } +// SetSupportedModelScopes sets the "supported_model_scopes" field. +func (u *GroupUpsertBulk) SetSupportedModelScopes(v []string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetSupportedModelScopes(v) + }) +} + +// UpdateSupportedModelScopes sets the "supported_model_scopes" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateSupportedModelScopes() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateSupportedModelScopes() + }) +} + // Exec executes the query. func (u *GroupUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go index 332ae52a..9e7246ea 100644 --- a/backend/ent/group_update.go +++ b/backend/ent/group_update.go @@ -10,6 +10,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/Wei-Shaw/sub2api/ent/account" "github.com/Wei-Shaw/sub2api/ent/apikey" @@ -462,6 +463,18 @@ func (_u *GroupUpdate) SetNillableMcpXMLInject(v *bool) *GroupUpdate { return _u } +// SetSupportedModelScopes sets the "supported_model_scopes" field. +func (_u *GroupUpdate) SetSupportedModelScopes(v []string) *GroupUpdate { + _u.mutation.SetSupportedModelScopes(v) + return _u +} + +// AppendSupportedModelScopes appends value to the "supported_model_scopes" field. +func (_u *GroupUpdate) AppendSupportedModelScopes(v []string) *GroupUpdate { + _u.mutation.AppendSupportedModelScopes(v) + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate { _u.mutation.AddAPIKeyIDs(ids...) @@ -891,6 +904,14 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.McpXMLInject(); ok { _spec.SetField(group.FieldMcpXMLInject, field.TypeBool, value) } + if value, ok := _u.mutation.SupportedModelScopes(); ok { + _spec.SetField(group.FieldSupportedModelScopes, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedSupportedModelScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, group.FieldSupportedModelScopes, value) + }) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1633,6 +1654,18 @@ func (_u *GroupUpdateOne) SetNillableMcpXMLInject(v *bool) *GroupUpdateOne { return _u } +// SetSupportedModelScopes sets the "supported_model_scopes" field. +func (_u *GroupUpdateOne) SetSupportedModelScopes(v []string) *GroupUpdateOne { + _u.mutation.SetSupportedModelScopes(v) + return _u +} + +// AppendSupportedModelScopes appends value to the "supported_model_scopes" field. +func (_u *GroupUpdateOne) AppendSupportedModelScopes(v []string) *GroupUpdateOne { + _u.mutation.AppendSupportedModelScopes(v) + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne { _u.mutation.AddAPIKeyIDs(ids...) @@ -2092,6 +2125,14 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) if value, ok := _u.mutation.McpXMLInject(); ok { _spec.SetField(group.FieldMcpXMLInject, field.TypeBool, value) } + if value, ok := _u.mutation.SupportedModelScopes(); ok { + _spec.SetField(group.FieldSupportedModelScopes, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedSupportedModelScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, group.FieldSupportedModelScopes, value) + }) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index 434916f8..d0238545 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -322,6 +322,7 @@ var ( {Name: "model_routing", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}}, {Name: "model_routing_enabled", Type: field.TypeBool, Default: false}, {Name: "mcp_xml_inject", Type: field.TypeBool, Default: true}, + {Name: "supported_model_scopes", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, } // GroupsTable holds the schema information for the "groups" table. GroupsTable = &schema.Table{ diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index cb654b7b..c7812024 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -5542,6 +5542,8 @@ type GroupMutation struct { model_routing *map[string][]int64 model_routing_enabled *bool mcp_xml_inject *bool + supported_model_scopes *[]string + appendsupported_model_scopes []string clearedFields map[string]struct{} api_keys map[int64]struct{} removedapi_keys map[int64]struct{} @@ -6843,6 +6845,57 @@ func (m *GroupMutation) ResetMcpXMLInject() { m.mcp_xml_inject = nil } +// SetSupportedModelScopes sets the "supported_model_scopes" field. +func (m *GroupMutation) SetSupportedModelScopes(s []string) { + m.supported_model_scopes = &s + m.appendsupported_model_scopes = nil +} + +// SupportedModelScopes returns the value of the "supported_model_scopes" field in the mutation. +func (m *GroupMutation) SupportedModelScopes() (r []string, exists bool) { + v := m.supported_model_scopes + if v == nil { + return + } + return *v, true +} + +// OldSupportedModelScopes returns the old "supported_model_scopes" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldSupportedModelScopes(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSupportedModelScopes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSupportedModelScopes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSupportedModelScopes: %w", err) + } + return oldValue.SupportedModelScopes, nil +} + +// AppendSupportedModelScopes adds s to the "supported_model_scopes" field. +func (m *GroupMutation) AppendSupportedModelScopes(s []string) { + m.appendsupported_model_scopes = append(m.appendsupported_model_scopes, s...) +} + +// AppendedSupportedModelScopes returns the list of values that were appended to the "supported_model_scopes" field in this mutation. +func (m *GroupMutation) AppendedSupportedModelScopes() ([]string, bool) { + if len(m.appendsupported_model_scopes) == 0 { + return nil, false + } + return m.appendsupported_model_scopes, true +} + +// ResetSupportedModelScopes resets all changes to the "supported_model_scopes" field. +func (m *GroupMutation) ResetSupportedModelScopes() { + m.supported_model_scopes = nil + m.appendsupported_model_scopes = nil +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. func (m *GroupMutation) AddAPIKeyIDs(ids ...int64) { if m.api_keys == nil { @@ -7201,7 +7254,7 @@ func (m *GroupMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *GroupMutation) Fields() []string { - fields := make([]string, 0, 23) + fields := make([]string, 0, 24) if m.created_at != nil { fields = append(fields, group.FieldCreatedAt) } @@ -7271,6 +7324,9 @@ func (m *GroupMutation) Fields() []string { if m.mcp_xml_inject != nil { fields = append(fields, group.FieldMcpXMLInject) } + if m.supported_model_scopes != nil { + fields = append(fields, group.FieldSupportedModelScopes) + } return fields } @@ -7325,6 +7381,8 @@ func (m *GroupMutation) Field(name string) (ent.Value, bool) { return m.ModelRoutingEnabled() case group.FieldMcpXMLInject: return m.McpXMLInject() + case group.FieldSupportedModelScopes: + return m.SupportedModelScopes() } return nil, false } @@ -7380,6 +7438,8 @@ func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, e return m.OldModelRoutingEnabled(ctx) case group.FieldMcpXMLInject: return m.OldMcpXMLInject(ctx) + case group.FieldSupportedModelScopes: + return m.OldSupportedModelScopes(ctx) } return nil, fmt.Errorf("unknown Group field %s", name) } @@ -7550,6 +7610,13 @@ func (m *GroupMutation) SetField(name string, value ent.Value) error { } m.SetMcpXMLInject(v) return nil + case group.FieldSupportedModelScopes: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSupportedModelScopes(v) + return nil } return fmt.Errorf("unknown Group field %s", name) } @@ -7860,6 +7927,9 @@ func (m *GroupMutation) ResetField(name string) error { case group.FieldMcpXMLInject: m.ResetMcpXMLInject() return nil + case group.FieldSupportedModelScopes: + m.ResetSupportedModelScopes() + return nil } return fmt.Errorf("unknown Group field %s", name) } diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 790c1489..8032dc58 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -341,6 +341,10 @@ func init() { groupDescMcpXMLInject := groupFields[19].Descriptor() // group.DefaultMcpXMLInject holds the default value on creation for the mcp_xml_inject field. group.DefaultMcpXMLInject = groupDescMcpXMLInject.Default.(bool) + // groupDescSupportedModelScopes is the schema descriptor for supported_model_scopes field. + groupDescSupportedModelScopes := groupFields[20].Descriptor() + // group.DefaultSupportedModelScopes holds the default value on creation for the supported_model_scopes field. + group.DefaultSupportedModelScopes = groupDescSupportedModelScopes.Default.([]string) promocodeFields := schema.PromoCode{}.Fields() _ = promocodeFields // promocodeDescCode is the schema descriptor for code field. diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index 020f9f57..8a3c1a90 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -115,6 +115,12 @@ func (Group) Fields() []ent.Field { field.Bool("mcp_xml_inject"). Default(true). Comment("是否注入 MCP XML 调用协议提示词(仅 antigravity 平台)"), + + // 支持的模型系列 (added by migration 046) + field.JSON("supported_model_scopes", []string{}). + Default([]string{"claude", "gemini_text", "gemini_image"}). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}). + Comment("支持的模型系列:claude, gemini_text, gemini_image"), } } diff --git a/backend/go.mod b/backend/go.mod index 329eddfc..9a36a0f1 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -4,6 +4,8 @@ go 1.25.6 require ( entgo.io/ent v0.14.5 + github.com/DATA-DOG/go-sqlmock v1.5.2 + github.com/dgraph-io/ristretto v0.2.0 github.com/gin-gonic/gin v1.9.1 github.com/golang-jwt/jwt/v5 v5.2.2 github.com/google/uuid v1.6.0 @@ -11,7 +13,10 @@ require ( github.com/gorilla/websocket v1.5.3 github.com/imroc/req/v3 v3.57.0 github.com/lib/pq v1.10.9 + github.com/pquerna/otp v1.5.0 github.com/redis/go-redis/v9 v9.17.2 + github.com/refraction-networking/utls v1.8.1 + github.com/robfig/cron/v3 v3.0.1 github.com/shirou/gopsutil/v4 v4.25.6 github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.11.1 @@ -25,6 +30,7 @@ require ( golang.org/x/sync v0.19.0 golang.org/x/term v0.38.0 gopkg.in/yaml.v3 v3.0.1 + modernc.org/sqlite v1.44.3 ) require ( @@ -36,6 +42,7 @@ require ( github.com/andybalholm/brotli v1.2.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/bmatcuk/doublestar v1.3.4 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/bytedance/sonic v1.9.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -46,7 +53,6 @@ require ( github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dgraph-io/ristretto v0.2.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v28.5.1+incompatible // indirect @@ -97,6 +103,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/ncruces/go-strftime v1.0.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect @@ -106,9 +113,8 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.57.1 // indirect - github.com/refraction-networking/utls v1.8.1 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -139,13 +145,15 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.9.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect golang.org/x/mod v0.30.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/text v0.32.0 // indirect golang.org/x/tools v0.39.0 // indirect - golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect google.golang.org/grpc v1.75.1 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + modernc.org/libc v1.67.6 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index 415e73a7..371623ad 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -20,6 +20,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -53,6 +55,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -111,6 +115,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -121,6 +127,9 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= @@ -141,6 +150,7 @@ github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -199,6 +209,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -214,6 +226,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= +github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= @@ -224,6 +238,8 @@ github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4Vi github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkrMJI0pRUOCAo= github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -336,8 +352,8 @@ golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= @@ -363,8 +379,8 @@ golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= -golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -387,4 +403,32 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= +modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc= +modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM= +modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= +modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE= +modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY= +modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= +modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= +modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI= +modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY= +modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/backend/internal/domain/constants.go b/backend/internal/domain/constants.go index 4ecea9d8..586e6309 100644 --- a/backend/internal/domain/constants.go +++ b/backend/internal/domain/constants.go @@ -29,6 +29,7 @@ const ( AccountTypeOAuth = "oauth" // OAuth类型账号(full scope: profile + inference) AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope) AccountTypeAPIKey = "apikey" // API Key类型账号 + AccountTypeUpstream = "upstream" // 上游透传类型账号(通过 Base URL + API Key 连接上游) ) // Redeem type constants diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index bbf5d026..6d42f726 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -84,7 +84,7 @@ type CreateAccountRequest struct { Name string `json:"name" binding:"required"` Notes *string `json:"notes"` Platform string `json:"platform" binding:"required"` - Type string `json:"type" binding:"required,oneof=oauth setup-token apikey"` + Type string `json:"type" binding:"required,oneof=oauth setup-token apikey upstream"` Credentials map[string]any `json:"credentials" binding:"required"` Extra map[string]any `json:"extra"` ProxyID *int64 `json:"proxy_id"` @@ -102,7 +102,7 @@ type CreateAccountRequest struct { type UpdateAccountRequest struct { Name string `json:"name"` Notes *string `json:"notes"` - Type string `json:"type" binding:"omitempty,oneof=oauth setup-token apikey"` + Type string `json:"type" binding:"omitempty,oneof=oauth setup-token apikey upstream"` Credentials map[string]any `json:"credentials"` Extra map[string]any `json:"extra"` ProxyID *int64 `json:"proxy_id"` diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go index 32391418..9192fe45 100644 --- a/backend/internal/handler/admin/group_handler.go +++ b/backend/internal/handler/admin/group_handler.go @@ -45,6 +45,8 @@ type CreateGroupRequest struct { ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled bool `json:"model_routing_enabled"` MCPXMLInject *bool `json:"mcp_xml_inject"` + // 支持的模型系列(仅 antigravity 平台使用) + SupportedModelScopes []string `json:"supported_model_scopes"` } // UpdateGroupRequest represents update group request @@ -70,6 +72,8 @@ type UpdateGroupRequest struct { ModelRouting map[string][]int64 `json:"model_routing"` ModelRoutingEnabled *bool `json:"model_routing_enabled"` MCPXMLInject *bool `json:"mcp_xml_inject"` + // 支持的模型系列(仅 antigravity 平台使用) + SupportedModelScopes *[]string `json:"supported_model_scopes"` } // List handles listing all groups with pagination @@ -177,6 +181,7 @@ func (h *GroupHandler) Create(c *gin.Context) { ModelRouting: req.ModelRouting, ModelRoutingEnabled: req.ModelRoutingEnabled, MCPXMLInject: req.MCPXMLInject, + SupportedModelScopes: req.SupportedModelScopes, }) if err != nil { response.ErrorFrom(c, err) @@ -221,6 +226,7 @@ func (h *GroupHandler) Update(c *gin.Context) { ModelRouting: req.ModelRouting, ModelRoutingEnabled: req.ModelRoutingEnabled, MCPXMLInject: req.MCPXMLInject, + SupportedModelScopes: req.SupportedModelScopes, }) if err != nil { response.ErrorFrom(c, err) diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 7b33e473..ab0b86fe 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -88,6 +88,8 @@ type AdminGroup struct { // MCP XML 协议注入(仅 antigravity 平台使用) MCPXMLInject bool `json:"mcp_xml_inject"` + // 支持的模型系列(仅 antigravity 平台使用) + SupportedModelScopes []string `json:"supported_model_scopes"` AccountGroups []AccountGroup `json:"account_groups,omitempty"` AccountCount int64 `json:"account_count,omitempty"` } diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index e3580a67..59f13985 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -140,6 +140,7 @@ func (r *apiKeyRepository) GetByKeyForAuth(ctx context.Context, key string) (*se group.FieldModelRoutingEnabled, group.FieldModelRouting, group.FieldMcpXMLInject, + group.FieldSupportedModelScopes, ) }). Only(ctx) @@ -433,6 +434,7 @@ func groupEntityToService(g *dbent.Group) *service.Group { ModelRouting: g.ModelRouting, ModelRoutingEnabled: g.ModelRoutingEnabled, MCPXMLInject: g.McpXMLInject, + SupportedModelScopes: g.SupportedModelScopes, CreatedAt: g.CreatedAt, UpdatedAt: g.UpdatedAt, } diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index 116e45a3..53624635 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -59,6 +59,9 @@ func (r *groupRepository) Create(ctx context.Context, groupIn *service.Group) er builder = builder.SetModelRouting(groupIn.ModelRouting) } + // 设置支持的模型系列(始终设置,空数组表示不限制) + builder = builder.SetSupportedModelScopes(groupIn.SupportedModelScopes) + created, err := builder.Save(ctx) if err == nil { groupIn.ID = created.ID @@ -89,7 +92,6 @@ func (r *groupRepository) GetByIDLite(ctx context.Context, id int64) (*service.G if err != nil { return nil, translatePersistenceError(err, service.ErrGroupNotFound, nil) } - return groupEntityToService(m), nil } @@ -133,6 +135,9 @@ func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) er builder = builder.ClearModelRouting() } + // 处理 SupportedModelScopes(始终设置,空数组表示不限制) + builder = builder.SetSupportedModelScopes(groupIn.SupportedModelScopes) + updated, err := builder.Save(ctx) if err != nil { return translatePersistenceError(err, service.ErrGroupNotFound, service.ErrGroupExists) diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index d541c73a..1449070e 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -113,6 +113,8 @@ type CreateGroupInput struct { ModelRouting map[string][]int64 ModelRoutingEnabled bool // 是否启用模型路由 MCPXMLInject *bool + // 支持的模型系列(仅 antigravity 平台使用) + SupportedModelScopes []string } type UpdateGroupInput struct { @@ -138,6 +140,8 @@ type UpdateGroupInput struct { ModelRouting map[string][]int64 ModelRoutingEnabled *bool // 是否启用模型路由 MCPXMLInject *bool + // 支持的模型系列(仅 antigravity 平台使用) + SupportedModelScopes *[]string } type CreateAccountInput struct { @@ -613,6 +617,7 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn FallbackGroupIDOnInvalidRequest: fallbackOnInvalidRequest, ModelRouting: input.ModelRouting, MCPXMLInject: mcpXMLInject, + SupportedModelScopes: input.SupportedModelScopes, } if err := s.groupRepo.Create(ctx, group); err != nil { return nil, err @@ -797,6 +802,11 @@ func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *Upd group.MCPXMLInject = *input.MCPXMLInject } + // 支持的模型系列(仅 antigravity 平台使用) + if input.SupportedModelScopes != nil { + group.SupportedModelScopes = *input.SupportedModelScopes + } + if err := s.groupRepo.Update(ctx, group); err != nil { return nil, err } diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index dd52a559..2af9efdb 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -412,6 +412,11 @@ type TestConnectionResult struct { // TestConnection 测试 Antigravity 账号连接(非流式,无重试、无计费) // 支持 Claude 和 Gemini 两种协议,根据 modelID 前缀自动选择 func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account *Account, modelID string) (*TestConnectionResult, error) { + // 上游透传账号使用专用测试方法 + if account.Type == AccountTypeUpstream { + return s.testUpstreamConnection(ctx, account, modelID) + } + // 获取 token if s.tokenProvider == nil { return nil, errors.New("antigravity token provider not configured") @@ -506,6 +511,87 @@ func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account return nil, lastErr } +// testUpstreamConnection 测试上游透传账号连接 +func (s *AntigravityGatewayService) testUpstreamConnection(ctx context.Context, account *Account, modelID string) (*TestConnectionResult, error) { + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + apiKey := strings.TrimSpace(account.GetCredential("api_key")) + if baseURL == "" || apiKey == "" { + return nil, errors.New("upstream account missing base_url or api_key") + } + baseURL = strings.TrimSuffix(baseURL, "/") + + // 使用 Claude 模型进行测试 + if modelID == "" { + modelID = "claude-sonnet-4-20250514" + } + + // 构建最小测试请求 + testReq := map[string]any{ + "model": modelID, + "max_tokens": 1, + "messages": []map[string]any{ + {"role": "user", "content": "."}, + }, + } + requestBody, err := json.Marshal(testReq) + if err != nil { + return nil, fmt.Errorf("构建请求失败: %w", err) + } + + // 构建 HTTP 请求 + upstreamURL := baseURL + "/v1/messages" + req, err := http.NewRequestWithContext(ctx, http.MethodPost, upstreamURL, bytes.NewReader(requestBody)) + if err != nil { + return nil, fmt.Errorf("创建请求失败: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+apiKey) + req.Header.Set("x-api-key", apiKey) + req.Header.Set("anthropic-version", "2023-06-01") + + // 代理 URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + log.Printf("[antigravity-Test-Upstream] account=%s url=%s", account.Name, upstreamURL) + + // 发送请求 + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + return nil, fmt.Errorf("请求失败: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + respBody, err := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + if err != nil { + return nil, fmt.Errorf("读取响应失败: %w", err) + } + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("API 返回 %d: %s", resp.StatusCode, string(respBody)) + } + + // 提取响应文本 + var respData map[string]any + text := "" + if json.Unmarshal(respBody, &respData) == nil { + if content, ok := respData["content"].([]any); ok && len(content) > 0 { + if block, ok := content[0].(map[string]any); ok { + if t, ok := block["text"].(string); ok { + text = t + } + } + } + } + + return &TestConnectionResult{ + Text: text, + MappedModel: modelID, + }, nil +} + // buildGeminiTestRequest 构建 Gemini 格式测试请求 // 使用最小 token 消耗:输入 "." + maxOutputTokens: 1 func (s *AntigravityGatewayService) buildGeminiTestRequest(projectID, model string) ([]byte, error) { @@ -728,6 +814,11 @@ func isModelNotFoundError(statusCode int, body []byte) bool { // Forward 转发 Claude 协议请求(Claude → Gemini 转换) func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, account *Account, body []byte) (*ForwardResult, error) { + // 上游透传账号直接转发,不走 OAuth token 刷新 + if account.Type == AccountTypeUpstream { + return s.ForwardUpstream(ctx, c, account, body) + } + startTime := time.Now() sessionID := getSessionID(c) prefix := logPrefix(sessionID, account.Name) @@ -1349,6 +1440,208 @@ func stripSignatureSensitiveBlocksFromClaudeRequest(req *antigravity.ClaudeReque return changed, nil } +// ForwardUpstream 透传请求到上游 Antigravity 服务 +// 用于 upstream 类型账号,直接使用 base_url + api_key 转发,不走 OAuth token +func (s *AntigravityGatewayService) ForwardUpstream(ctx context.Context, c *gin.Context, account *Account, body []byte) (*ForwardResult, error) { + startTime := time.Now() + sessionID := getSessionID(c) + prefix := logPrefix(sessionID, account.Name) + + // 获取上游配置 + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + apiKey := strings.TrimSpace(account.GetCredential("api_key")) + if baseURL == "" || apiKey == "" { + return nil, fmt.Errorf("upstream account missing base_url or api_key") + } + baseURL = strings.TrimSuffix(baseURL, "/") + + // 解析请求获取模型信息 + var claudeReq antigravity.ClaudeRequest + if err := json.Unmarshal(body, &claudeReq); err != nil { + return nil, fmt.Errorf("parse claude request: %w", err) + } + if strings.TrimSpace(claudeReq.Model) == "" { + return nil, fmt.Errorf("missing model") + } + originalModel := claudeReq.Model + billingModel := originalModel + + // 构建上游请求 URL + upstreamURL := baseURL + "/v1/messages" + + // 创建请求 + req, err := http.NewRequestWithContext(ctx, http.MethodPost, upstreamURL, bytes.NewReader(body)) + if err != nil { + return nil, fmt.Errorf("create upstream request: %w", err) + } + + // 设置请求头 + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+apiKey) + req.Header.Set("x-api-key", apiKey) // Claude API 兼容 + + // 透传 Claude 相关 headers + if v := c.GetHeader("anthropic-version"); v != "" { + req.Header.Set("anthropic-version", v) + } + if v := c.GetHeader("anthropic-beta"); v != "" { + req.Header.Set("anthropic-beta", v) + } + + // 代理 URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // 发送请求 + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + log.Printf("%s upstream request failed: %v", prefix, err) + return nil, fmt.Errorf("upstream request failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + // 处理错误响应 + if resp.StatusCode >= 400 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + + // 429 错误时标记账号限流 + if resp.StatusCode == http.StatusTooManyRequests { + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, AntigravityQuotaScopeClaude) + } + + // 透传上游错误 + c.Header("Content-Type", resp.Header.Get("Content-Type")) + c.Status(resp.StatusCode) + _, _ = c.Writer.Write(respBody) + + return &ForwardResult{ + Model: billingModel, + }, nil + } + + // 处理成功响应(流式/非流式) + var usage *ClaudeUsage + var firstTokenMs *int + + if claudeReq.Stream { + // 流式响应:透传 + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + c.Status(http.StatusOK) + + usage, firstTokenMs = s.streamUpstreamResponse(c, resp, startTime) + } else { + // 非流式响应:直接透传 + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read upstream response: %w", err) + } + + // 提取 usage + usage = s.extractClaudeUsage(respBody) + + c.Header("Content-Type", resp.Header.Get("Content-Type")) + c.Status(http.StatusOK) + _, _ = c.Writer.Write(respBody) + } + + // 构建计费结果 + duration := time.Since(startTime) + log.Printf("%s status=success duration_ms=%d", prefix, duration.Milliseconds()) + + return &ForwardResult{ + Model: billingModel, + Stream: claudeReq.Stream, + Duration: duration, + FirstTokenMs: firstTokenMs, + Usage: ClaudeUsage{ + InputTokens: usage.InputTokens, + OutputTokens: usage.OutputTokens, + CacheReadInputTokens: usage.CacheReadInputTokens, + CacheCreationInputTokens: usage.CacheCreationInputTokens, + }, + }, nil +} + +// streamUpstreamResponse 透传上游流式响应并提取 usage +func (s *AntigravityGatewayService) streamUpstreamResponse(c *gin.Context, resp *http.Response, startTime time.Time) (*ClaudeUsage, *int) { + usage := &ClaudeUsage{} + var firstTokenMs *int + var firstTokenRecorded bool + + scanner := bufio.NewScanner(resp.Body) + buf := make([]byte, 0, 64*1024) + scanner.Buffer(buf, 1024*1024) + + for scanner.Scan() { + line := scanner.Bytes() + + // 记录首 token 时间 + if !firstTokenRecorded && len(line) > 0 { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + firstTokenRecorded = true + } + + // 尝试从 message_delta 或 message_stop 事件提取 usage + if bytes.HasPrefix(line, []byte("data: ")) { + dataStr := bytes.TrimPrefix(line, []byte("data: ")) + var event map[string]any + if json.Unmarshal(dataStr, &event) == nil { + if u, ok := event["usage"].(map[string]any); ok { + if v, ok := u["input_tokens"].(float64); ok && int(v) > 0 { + usage.InputTokens = int(v) + } + if v, ok := u["output_tokens"].(float64); ok && int(v) > 0 { + usage.OutputTokens = int(v) + } + if v, ok := u["cache_read_input_tokens"].(float64); ok && int(v) > 0 { + usage.CacheReadInputTokens = int(v) + } + if v, ok := u["cache_creation_input_tokens"].(float64); ok && int(v) > 0 { + usage.CacheCreationInputTokens = int(v) + } + } + } + } + + // 透传行 + _, _ = c.Writer.Write(line) + _, _ = c.Writer.Write([]byte("\n")) + c.Writer.Flush() + } + + return usage, firstTokenMs +} + +// extractClaudeUsage 从非流式 Claude 响应提取 usage +func (s *AntigravityGatewayService) extractClaudeUsage(body []byte) *ClaudeUsage { + usage := &ClaudeUsage{} + var resp map[string]any + if json.Unmarshal(body, &resp) != nil { + return usage + } + if u, ok := resp["usage"].(map[string]any); ok { + if v, ok := u["input_tokens"].(float64); ok { + usage.InputTokens = int(v) + } + if v, ok := u["output_tokens"].(float64); ok { + usage.OutputTokens = int(v) + } + if v, ok := u["cache_read_input_tokens"].(float64); ok { + usage.CacheReadInputTokens = int(v) + } + if v, ok := u["cache_creation_input_tokens"].(float64); ok { + usage.CacheCreationInputTokens = int(v) + } + } + return usage +} + // ForwardGemini 转发 Gemini 协议请求 func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Context, account *Account, originalModel string, action string, stream bool, body []byte) (*ForwardResult, error) { startTime := time.Now() diff --git a/backend/internal/service/antigravity_quota_scope.go b/backend/internal/service/antigravity_quota_scope.go index 34cd9a4c..e1a0a1f2 100644 --- a/backend/internal/service/antigravity_quota_scope.go +++ b/backend/internal/service/antigravity_quota_scope.go @@ -1,6 +1,7 @@ package service import ( + "slices" "strings" "time" ) @@ -16,6 +17,21 @@ const ( AntigravityQuotaScopeGeminiImage AntigravityQuotaScope = "gemini_image" ) +// IsScopeSupported 检查给定的 scope 是否在分组支持的 scope 列表中 +func IsScopeSupported(supportedScopes []string, scope AntigravityQuotaScope) bool { + if len(supportedScopes) == 0 { + // 未配置时默认全部支持 + return true + } + supported := slices.Contains(supportedScopes, string(scope)) + return supported +} + +// ResolveAntigravityQuotaScope 根据模型名称解析配额域(导出版本) +func ResolveAntigravityQuotaScope(requestedModel string) (AntigravityQuotaScope, bool) { + return resolveAntigravityQuotaScope(requestedModel) +} + // resolveAntigravityQuotaScope 根据模型名称解析配额域 func resolveAntigravityQuotaScope(requestedModel string) (AntigravityQuotaScope, bool) { model := normalizeAntigravityModelName(requestedModel) diff --git a/backend/internal/service/api_key_auth_cache.go b/backend/internal/service/api_key_auth_cache.go index 5cb2fbfb..b56e7cf3 100644 --- a/backend/internal/service/api_key_auth_cache.go +++ b/backend/internal/service/api_key_auth_cache.go @@ -44,6 +44,9 @@ type APIKeyAuthGroupSnapshot struct { ModelRouting map[string][]int64 `json:"model_routing,omitempty"` ModelRoutingEnabled bool `json:"model_routing_enabled"` MCPXMLInject bool `json:"mcp_xml_inject"` + + // 支持的模型系列(仅 antigravity 平台使用) + SupportedModelScopes []string `json:"supported_model_scopes,omitempty"` } // APIKeyAuthCacheEntry 缓存条目,支持负缓存 diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go index 5fb4edee..d4b2347e 100644 --- a/backend/internal/service/api_key_auth_cache_impl.go +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -241,6 +241,7 @@ func (s *APIKeyService) snapshotFromAPIKey(apiKey *APIKey) *APIKeyAuthSnapshot { ModelRouting: apiKey.Group.ModelRouting, ModelRoutingEnabled: apiKey.Group.ModelRoutingEnabled, MCPXMLInject: apiKey.Group.MCPXMLInject, + SupportedModelScopes: apiKey.Group.SupportedModelScopes, } } return snapshot @@ -287,6 +288,7 @@ func (s *APIKeyService) snapshotToAPIKey(key string, snapshot *APIKeyAuthSnapsho ModelRouting: snapshot.Group.ModelRouting, ModelRoutingEnabled: snapshot.Group.ModelRoutingEnabled, MCPXMLInject: snapshot.Group.MCPXMLInject, + SupportedModelScopes: snapshot.Group.SupportedModelScopes, } } return apiKey diff --git a/backend/internal/service/domain_constants.go b/backend/internal/service/domain_constants.go index eee8bddd..218b7aae 100644 --- a/backend/internal/service/domain_constants.go +++ b/backend/internal/service/domain_constants.go @@ -31,6 +31,7 @@ const ( AccountTypeOAuth = domain.AccountTypeOAuth // OAuth类型账号(full scope: profile + inference) AccountTypeSetupToken = domain.AccountTypeSetupToken // Setup Token类型账号(inference only scope) AccountTypeAPIKey = domain.AccountTypeAPIKey // API Key类型账号 + AccountTypeUpstream = domain.AccountTypeUpstream // 上游透传类型账号(通过 Base URL + API Key 连接上游) ) // Redeem type constants diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 0fb9eced..30078e3c 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -92,6 +92,9 @@ var ( // ErrClaudeCodeOnly 表示分组仅允许 Claude Code 客户端访问 var ErrClaudeCodeOnly = errors.New("this group only allows Claude Code clients") +// ErrModelScopeNotSupported 表示请求的模型系列不在分组支持的范围内 +var ErrModelScopeNotSupported = errors.New("model scope not supported by this group") + // allowedHeaders 白名单headers(参考CRS项目) var allowedHeaders = map[string]bool{ "accept": true, @@ -582,6 +585,13 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro log.Printf("[ModelRoutingDebug] load-aware enabled: group_id=%v model=%s session=%s platform=%s", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), platform) } + // Antigravity 模型系列检查(在账号选择前检查,确保所有代码路径都经过此检查) + if platform == PlatformAntigravity && groupID != nil && requestedModel != "" { + if err := s.checkAntigravityModelScope(ctx, *groupID, requestedModel); err != nil { + return nil, err + } + } + accounts, useMixed, err := s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) if err != nil { return nil, err @@ -1477,6 +1487,13 @@ func shuffleWithinPriority(accounts []*Account) { // selectAccountForModelWithPlatform 选择单平台账户(完全隔离) func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, platform string) (*Account, error) { + // 对 Antigravity 平台,检查请求的模型系列是否在分组支持范围内 + if platform == PlatformAntigravity && groupID != nil && requestedModel != "" { + if err := s.checkAntigravityModelScope(ctx, *groupID, requestedModel); err != nil { + return nil, err + } + } + preferOAuth := platform == PlatformGemini routingAccountIDs := s.routingAccountIDsForRequest(ctx, groupID, requestedModel, platform) @@ -3898,6 +3915,27 @@ func (s *GatewayService) validateUpstreamBaseURL(raw string) (string, error) { return normalized, nil } +// checkAntigravityModelScope 检查 Antigravity 平台的模型系列是否在分组支持范围内 +func (s *GatewayService) checkAntigravityModelScope(ctx context.Context, groupID int64, requestedModel string) error { + scope, ok := ResolveAntigravityQuotaScope(requestedModel) + if !ok { + return nil // 无法解析 scope,跳过检查 + } + + group, err := s.resolveGroupByID(ctx, groupID) + if err != nil { + return nil // 查询失败时放行 + } + if group == nil { + return nil // 分组不存在时放行 + } + + if !IsScopeSupported(group.SupportedModelScopes, scope) { + return ErrModelScopeNotSupported + } + return nil +} + // GetAvailableModels returns the list of models available for a group // It aggregates model_mapping keys from all schedulable accounts in the group func (s *GatewayService) GetAvailableModels(ctx context.Context, groupID *int64, platform string) []string { diff --git a/backend/internal/service/group.go b/backend/internal/service/group.go index 7f1825c6..1302047a 100644 --- a/backend/internal/service/group.go +++ b/backend/internal/service/group.go @@ -41,6 +41,10 @@ type Group struct { // MCP XML 协议注入开关(仅 antigravity 平台使用) MCPXMLInject bool + // 支持的模型系列(仅 antigravity 平台使用) + // 可选值: claude, gemini_text, gemini_image + SupportedModelScopes []string + CreatedAt time.Time UpdatedAt time.Time diff --git a/backend/internal/service/user.go b/backend/internal/service/user.go index c565607e..0f589eb3 100644 --- a/backend/internal/service/user.go +++ b/backend/internal/service/user.go @@ -21,6 +21,11 @@ type User struct { CreatedAt time.Time UpdatedAt time.Time + // TOTP 双因素认证字段 + TotpSecretEncrypted *string // AES-256-GCM 加密的 TOTP 密钥 + TotpEnabled bool // 是否启用 TOTP + TotpEnabledAt *time.Time // TOTP 启用时间 + APIKeys []APIKey Subscriptions []UserSubscription } diff --git a/backend/internal/service/user_service.go b/backend/internal/service/user_service.go index 99bf7fd0..1bfb392e 100644 --- a/backend/internal/service/user_service.go +++ b/backend/internal/service/user_service.go @@ -39,7 +39,7 @@ type UserRepository interface { ExistsByEmail(ctx context.Context, email string) (bool, error) RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) - // TOTP 相关方法 + // TOTP 双因素认证 UpdateTotpSecret(ctx context.Context, userID int64, encryptedSecret *string) error EnableTotp(ctx context.Context, userID int64) error DisableTotp(ctx context.Context, userID int64) error diff --git a/backend/migrations/046_add_group_supported_model_scopes.sql b/backend/migrations/046_add_group_supported_model_scopes.sql new file mode 100644 index 00000000..0b2b3968 --- /dev/null +++ b/backend/migrations/046_add_group_supported_model_scopes.sql @@ -0,0 +1,6 @@ +-- 添加分组支持的模型系列字段 +ALTER TABLE groups +ADD COLUMN IF NOT EXISTS supported_model_scopes JSONB NOT NULL +DEFAULT '["claude", "gemini_text", "gemini_image"]'::jsonb; + +COMMENT ON COLUMN groups.supported_model_scopes IS '支持的模型系列:claude, gemini_text, gemini_image'; diff --git a/docs/rename_local_migrations_20260202.sql b/docs/rename_local_migrations_20260202.sql new file mode 100644 index 00000000..a5ba2ef1 --- /dev/null +++ b/docs/rename_local_migrations_20260202.sql @@ -0,0 +1,27 @@ +-- 修正 schema_migrations 中“本地改名”的迁移文件名 +-- 适用场景:你已执行过旧文件名的迁移,合并后仅改了自己这边的文件名 + +BEGIN; + +UPDATE schema_migrations +SET filename = '042b_add_ops_system_metrics_switch_count.sql' +WHERE filename = '042_add_ops_system_metrics_switch_count.sql' + AND NOT EXISTS ( + SELECT 1 FROM schema_migrations WHERE filename = '042b_add_ops_system_metrics_switch_count.sql' + ); + +UPDATE schema_migrations +SET filename = '043b_add_group_invalid_request_fallback.sql' +WHERE filename = '043_add_group_invalid_request_fallback.sql' + AND NOT EXISTS ( + SELECT 1 FROM schema_migrations WHERE filename = '043b_add_group_invalid_request_fallback.sql' + ); + +UPDATE schema_migrations +SET filename = '044b_add_group_mcp_xml_inject.sql' +WHERE filename = '044_add_group_mcp_xml_inject.sql' + AND NOT EXISTS ( + SELECT 1 FROM schema_migrations WHERE filename = '044b_add_group_mcp_xml_inject.sql' + ); + +COMMIT; diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 144241ff..580a98fa 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -614,21 +614,87 @@ - +
-
-
+ + + +
+
+ + +
+
+ + +

{{ t('admin.accounts.upstream.baseUrlHint') }}

+
+
+ + +

{{ t('admin.accounts.upstream.apiKeyHint') }}

@@ -1940,6 +2006,9 @@ const customErrorCodeInput = ref(null) const interceptWarmupRequests = ref(false) const autoPauseOnExpired = ref(true) const mixedScheduling = ref(false) // For antigravity accounts: enable mixed scheduling +const antigravityAccountType = ref<'oauth' | 'upstream'>('oauth') // For antigravity: oauth or upstream +const upstreamBaseUrl = ref('') // For upstream type: base URL +const upstreamApiKey = ref('') // For upstream type: API key const tempUnschedEnabled = ref(false) const tempUnschedRules = ref([]) const geminiOAuthType = ref<'code_assist' | 'google_one' | 'ai_studio'>('google_one') @@ -2037,7 +2106,13 @@ const form = reactive({ }) // Helper to check if current type needs OAuth flow -const isOAuthFlow = computed(() => accountCategory.value === 'oauth-based') +const isOAuthFlow = computed(() => { + // Antigravity upstream 类型不需要 OAuth 流程 + if (form.platform === 'antigravity' && antigravityAccountType.value === 'upstream') { + return false + } + return accountCategory.value === 'oauth-based' +}) const isManualInputMethod = computed(() => { return oauthFlowRef.value?.inputMethod === 'manual' @@ -2077,10 +2152,15 @@ watch( } ) -// Sync form.type based on accountCategory and addMethod +// Sync form.type based on accountCategory, addMethod, and antigravityAccountType watch( - [accountCategory, addMethod], - ([category, method]) => { + [accountCategory, addMethod, antigravityAccountType], + ([category, method, agType]) => { + // Antigravity upstream 类型 + if (form.platform === 'antigravity' && agType === 'upstream') { + form.type = 'upstream' + return + } if (category === 'oauth-based') { form.type = method as AccountType // 'oauth' or 'setup-token' } else { @@ -2108,9 +2188,10 @@ watch( if (newPlatform !== 'anthropic') { interceptWarmupRequests.value = false } - // Antigravity only supports OAuth + // Antigravity: reset to OAuth by default, but allow upstream selection if (newPlatform === 'antigravity') { accountCategory.value = 'oauth-based' + antigravityAccountType.value = 'oauth' } // Reset OAuth states oauth.resetState() @@ -2343,6 +2424,9 @@ const resetForm = () => { sessionIdleTimeout.value = null tlsFingerprintEnabled.value = false sessionIdMaskingEnabled.value = false + antigravityAccountType.value = 'oauth' + upstreamBaseUrl.value = '' + upstreamApiKey.value = '' tempUnschedEnabled.value = false tempUnschedRules.value = [] geminiOAuthType.value = 'code_assist' @@ -2371,6 +2455,36 @@ const handleSubmit = async () => { return } + // For Antigravity upstream type, create directly + if (form.platform === 'antigravity' && antigravityAccountType.value === 'upstream') { + if (!form.name.trim()) { + appStore.showError(t('admin.accounts.pleaseEnterAccountName')) + return + } + if (!upstreamBaseUrl.value.trim()) { + appStore.showError(t('admin.accounts.upstream.pleaseEnterBaseUrl')) + return + } + if (!upstreamApiKey.value.trim()) { + appStore.showError(t('admin.accounts.upstream.pleaseEnterApiKey')) + return + } + + submitting.value = true + try { + const credentials: Record = { + base_url: upstreamBaseUrl.value.trim(), + api_key: upstreamApiKey.value.trim() + } + await createAccountAndFinish(form.platform, 'upstream', credentials) + } catch (error: any) { + appStore.showError(error.response?.data?.detail || t('admin.accounts.failedToCreate')) + } finally { + submitting.value = false + } + return + } + // For apikey type, create directly if (!apiKeyValue.value.trim()) { appStore.showError(t('admin.accounts.pleaseEnterApiKey')) diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 8afaa0c4..7d6c2f5d 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1034,6 +1034,14 @@ export default { tooltip: 'When enabled, if the request contains MCP tools, an XML format call protocol prompt will be injected into the system prompt. Disable this to avoid interference with certain clients.', enabled: 'Enabled', disabled: 'Disabled' + }, + supportedScopes: { + title: 'Supported Model Families', + tooltip: 'Select the model families this group supports. Unchecked families will not be routed to this group.', + claude: 'Claude', + geminiText: 'Gemini Text', + geminiImage: 'Gemini Image', + hint: 'Select at least one model family' } }, @@ -1173,7 +1181,9 @@ export default { responsesApi: 'Responses API', googleOauth: 'Google OAuth', codeAssist: 'Code Assist', - antigravityOauth: 'Antigravity OAuth' + antigravityOauth: 'Antigravity OAuth', + upstream: 'Upstream', + upstreamDesc: 'Connect via Base URL + API Key' }, status: { active: 'Active', @@ -1431,6 +1441,15 @@ export default { pleaseEnterApiKey: 'Please enter API Key', apiKeyIsRequired: 'API Key is required', leaveEmptyToKeep: 'Leave empty to keep current key', + // Upstream type + upstream: { + baseUrl: 'Upstream Base URL', + baseUrlHint: 'The address of the upstream Antigravity service, e.g., https://upstream.example.com', + apiKey: 'Upstream API Key', + apiKeyHint: 'API Key for the upstream service', + pleaseEnterBaseUrl: 'Please enter upstream Base URL', + pleaseEnterApiKey: 'Please enter upstream API Key' + }, // OAuth flow oauth: { title: 'Claude Account Authorization', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 43fdfd59..feec93a7 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1109,6 +1109,14 @@ export default { tooltip: '启用后,当请求包含 MCP 工具时,会在 system prompt 中注入 XML 格式调用协议提示词。关闭此选项可避免对某些客户端造成干扰。', enabled: '已启用', disabled: '已禁用' + }, + supportedScopes: { + title: '支持的模型系列', + tooltip: '选择此分组支持的模型系列。未勾选的系列将不会被路由到此分组。', + claude: 'Claude', + geminiText: 'Gemini Text', + geminiImage: 'Gemini Image', + hint: '至少选择一个模型系列' } }, @@ -1294,6 +1302,8 @@ export default { googleOauth: 'Google OAuth', codeAssist: 'Code Assist', antigravityOauth: 'Antigravity OAuth', + upstream: '对接上游', + upstreamDesc: '通过 Base URL + API Key 连接上游', api_key: 'API Key', cookie: 'Cookie' }, @@ -1563,6 +1573,15 @@ export default { pleaseEnterApiKey: '请输入 API Key', apiKeyIsRequired: 'API Key 是必需的', leaveEmptyToKeep: '留空以保持当前密钥', + // Upstream type + upstream: { + baseUrl: '上游 Base URL', + baseUrlHint: '上游 Antigravity 服务的地址,例如:https://upstream.example.com', + apiKey: '上游 API Key', + apiKeyHint: '上游服务的 API Key', + pleaseEnterBaseUrl: '请输入上游 Base URL', + pleaseEnterApiKey: '请输入上游 API Key' + }, // OAuth flow oauth: { title: 'Claude 账号授权', diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 51e680d7..d65e6b46 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -365,6 +365,11 @@ export interface AdminGroup extends Group { // MCP XML 协议注入(仅 antigravity 平台使用) mcp_xml_inject: boolean + + // 支持的模型系列(仅 antigravity 平台使用) + supported_model_scopes?: string[] + + // 分组下账号数量(仅管理员可见) account_count?: number } @@ -414,6 +419,7 @@ export interface CreateGroupRequest { claude_code_only?: boolean fallback_group_id?: number | null fallback_group_id_on_invalid_request?: number | null + supported_model_scopes?: string[] } export interface UpdateGroupRequest { @@ -433,12 +439,13 @@ export interface UpdateGroupRequest { claude_code_only?: boolean fallback_group_id?: number | null fallback_group_id_on_invalid_request?: number | null + supported_model_scopes?: string[] } // ==================== Account & Proxy Types ==================== export type AccountPlatform = 'anthropic' | 'openai' | 'gemini' | 'antigravity' -export type AccountType = 'oauth' | 'setup-token' | 'apikey' +export type AccountType = 'oauth' | 'setup-token' | 'apikey' | 'upstream' export type OAuthAddMethod = 'oauth' | 'setup-token' export type ProxyProtocol = 'http' | 'https' | 'socks5' | 'socks5h' diff --git a/frontend/src/views/admin/GroupsView.vue b/frontend/src/views/admin/GroupsView.vue index 323fef75..80ea1f7e 100644 --- a/frontend/src/views/admin/GroupsView.vue +++ b/frontend/src/views/admin/GroupsView.vue @@ -404,6 +404,62 @@
+ +
+
+ + +
+ +
+
+

+ {{ t('admin.groups.supportedScopes.tooltip') }} +

+
+
+
+
+
+
+ + + +
+

{{ t('admin.groups.supportedScopes.hint') }}

+
+
@@ -907,6 +963,62 @@
+ +
+
+ + +
+ +
+
+

+ {{ t('admin.groups.supportedScopes.tooltip') }} +

+
+
+
+
+
+
+ + + +
+

{{ t('admin.groups.supportedScopes.hint') }}

+
+
@@ -1402,6 +1514,9 @@ const createForm = reactive({ fallback_group_id_on_invalid_request: null as number | null, // 模型路由开关 model_routing_enabled: false, + // 支持的模型系列(仅 antigravity 平台) + supported_model_scopes: ['claude', 'gemini_text', 'gemini_image'] as string[], + // MCP XML 协议注入开关(仅 antigravity 平台) mcp_xml_inject: true }) @@ -1472,6 +1587,26 @@ const removeSelectedAccount = (ruleIndex: number, accountId: number, isEdit: boo rule.accounts = rule.accounts.filter(a => a.id !== accountId) } +// 切换创建表单的模型系列选择 +const toggleCreateScope = (scope: string) => { + const idx = createForm.supported_model_scopes.indexOf(scope) + if (idx === -1) { + createForm.supported_model_scopes.push(scope) + } else { + createForm.supported_model_scopes.splice(idx, 1) + } +} + +// 切换编辑表单的模型系列选择 +const toggleEditScope = (scope: string) => { + const idx = editForm.supported_model_scopes.indexOf(scope) + if (idx === -1) { + editForm.supported_model_scopes.push(scope) + } else { + editForm.supported_model_scopes.splice(idx, 1) + } +} + // 处理账号搜索输入框聚焦 const onAccountSearchFocus = (ruleIndex: number, isEdit: boolean = false) => { const key = `${isEdit ? 'edit' : 'create'}-${ruleIndex}` @@ -1575,6 +1710,9 @@ const editForm = reactive({ fallback_group_id_on_invalid_request: null as number | null, // 模型路由开关 model_routing_enabled: false, + // 支持的模型系列(仅 antigravity 平台) + supported_model_scopes: ['claude', 'gemini_text', 'gemini_image'] as string[], + // MCP XML 协议注入开关(仅 antigravity 平台) mcp_xml_inject: true }) @@ -1658,6 +1796,7 @@ const closeCreateModal = () => { createForm.claude_code_only = false createForm.fallback_group_id = null createForm.fallback_group_id_on_invalid_request = null + createForm.supported_model_scopes = ['claude', 'gemini_text', 'gemini_image'] createForm.mcp_xml_inject = true createModelRoutingRules.value = [] } @@ -1710,6 +1849,7 @@ const handleEdit = async (group: AdminGroup) => { editForm.fallback_group_id = group.fallback_group_id editForm.fallback_group_id_on_invalid_request = group.fallback_group_id_on_invalid_request editForm.model_routing_enabled = group.model_routing_enabled || false + editForm.supported_model_scopes = group.supported_model_scopes || ['claude', 'gemini_text', 'gemini_image'] editForm.mcp_xml_inject = group.mcp_xml_inject ?? true // 加载模型路由规则(异步加载账号名称) editModelRoutingRules.value = await convertApiFormatToRoutingRules(group.model_routing) From 606e29d390371e322c29d720f27564ef33d5ddba Mon Sep 17 00:00:00 2001 From: bayma888 Date: Tue, 3 Feb 2026 00:16:10 +0800 Subject: [PATCH 084/214] feat(admin): add user balance/concurrency history modal - Add new API endpoint GET /admin/users/:id/balance-history with pagination and type filter - Add SumPositiveBalanceByUser for calculating total recharged amount - Create UserBalanceHistoryModal component with: - User info header (email, username, created_at, current balance, notes, total recharged) - Type filter dropdown (all/balance/admin_balance/concurrency/admin_concurrency/subscription) - Quick deposit/withdraw buttons - Paginated history list with icons and colored values - Add instant tooltip on balance column for better UX - Add z-index prop to BaseDialog for modal stacking control - Update i18n translations (zh/en) --- .../internal/handler/admin/user_handler.go | 41 +++ .../internal/repository/redeem_code_repo.go | 51 +++ backend/internal/server/routes/admin.go | 1 + backend/internal/service/admin_service.go | 19 ++ backend/internal/service/redeem_service.go | 5 + frontend/src/api/admin/index.ts | 3 + frontend/src/api/admin/users.ts | 50 ++- .../admin/user/UserBalanceHistoryModal.vue | 320 ++++++++++++++++++ frontend/src/components/common/BaseDialog.vue | 10 +- frontend/src/i18n/locales/en.ts | 14 + frontend/src/i18n/locales/zh.ts | 14 + frontend/src/views/admin/UsersView.vue | 64 +++- 12 files changed, 588 insertions(+), 4 deletions(-) create mode 100644 frontend/src/components/admin/user/UserBalanceHistoryModal.vue diff --git a/backend/internal/handler/admin/user_handler.go b/backend/internal/handler/admin/user_handler.go index 9a5a691f..ac76689d 100644 --- a/backend/internal/handler/admin/user_handler.go +++ b/backend/internal/handler/admin/user_handler.go @@ -277,3 +277,44 @@ func (h *UserHandler) GetUserUsage(c *gin.Context) { response.Success(c, stats) } + +// GetBalanceHistory handles getting user's balance/concurrency change history +// GET /api/v1/admin/users/:id/balance-history +// Query params: +// - type: filter by record type (balance, admin_balance, concurrency, admin_concurrency, subscription) +func (h *UserHandler) GetBalanceHistory(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + page, pageSize := response.ParsePagination(c) + codeType := c.Query("type") + + codes, total, totalRecharged, err := h.adminService.GetUserBalanceHistory(c.Request.Context(), userID, page, pageSize, codeType) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Convert to admin DTO (includes notes field for admin visibility) + out := make([]dto.AdminRedeemCode, 0, len(codes)) + for i := range codes { + out = append(out, *dto.RedeemCodeFromServiceAdmin(&codes[i])) + } + + // Custom response with total_recharged alongside pagination + pages := int((total + int64(pageSize) - 1) / int64(pageSize)) + if pages < 1 { + pages = 1 + } + response.Success(c, gin.H{ + "items": out, + "total": total, + "page": page, + "page_size": pageSize, + "pages": pages, + "total_recharged": totalRecharged, + }) +} diff --git a/backend/internal/repository/redeem_code_repo.go b/backend/internal/repository/redeem_code_repo.go index ee8a01b5..a3a048c3 100644 --- a/backend/internal/repository/redeem_code_repo.go +++ b/backend/internal/repository/redeem_code_repo.go @@ -202,6 +202,57 @@ func (r *redeemCodeRepository) ListByUser(ctx context.Context, userID int64, lim return redeemCodeEntitiesToService(codes), nil } +// ListByUserPaginated returns paginated balance/concurrency history for a user. +// Supports optional type filter (e.g. "balance", "admin_balance", "concurrency", "admin_concurrency", "subscription"). +func (r *redeemCodeRepository) ListByUserPaginated(ctx context.Context, userID int64, params pagination.PaginationParams, codeType string) ([]service.RedeemCode, *pagination.PaginationResult, error) { + q := r.client.RedeemCode.Query(). + Where(redeemcode.UsedByEQ(userID)) + + // Optional type filter + if codeType != "" { + q = q.Where(redeemcode.TypeEQ(codeType)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + codes, err := q. + WithGroup(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(redeemcode.FieldUsedAt)). + All(ctx) + if err != nil { + return nil, nil, err + } + + return redeemCodeEntitiesToService(codes), paginationResultFromTotal(int64(total), params), nil +} + +// SumPositiveBalanceByUser returns total recharged amount (sum of value > 0 where type is balance/admin_balance). +func (r *redeemCodeRepository) SumPositiveBalanceByUser(ctx context.Context, userID int64) (float64, error) { + var result []struct { + Sum float64 `json:"sum"` + } + err := r.client.RedeemCode.Query(). + Where( + redeemcode.UsedByEQ(userID), + redeemcode.ValueGT(0), + redeemcode.TypeIn("balance", "admin_balance"), + ). + Aggregate(dbent.As(dbent.Sum(redeemcode.FieldValue), "sum")). + Scan(ctx, &result) + if err != nil { + return 0, err + } + if len(result) == 0 { + return 0, nil + } + return result[0].Sum, nil +} + func redeemCodeEntityToService(m *dbent.RedeemCode) *service.RedeemCode { if m == nil { return nil diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 050e724d..3ade973a 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -172,6 +172,7 @@ func registerUserManagementRoutes(admin *gin.RouterGroup, h *handler.Handlers) { users.POST("/:id/balance", h.Admin.User.UpdateBalance) users.GET("/:id/api-keys", h.Admin.User.GetUserAPIKeys) users.GET("/:id/usage", h.Admin.User.GetUserUsage) + users.GET("/:id/balance-history", h.Admin.User.GetBalanceHistory) // User attribute values users.GET("/:id/attributes", h.Admin.UserAttribute.GetUserAttributes) diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 0afa0716..63958e5b 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -22,6 +22,10 @@ type AdminService interface { UpdateUserBalance(ctx context.Context, userID int64, balance float64, operation string, notes string) (*User, error) GetUserAPIKeys(ctx context.Context, userID int64, page, pageSize int) ([]APIKey, int64, error) GetUserUsageStats(ctx context.Context, userID int64, period string) (any, error) + // GetUserBalanceHistory returns paginated balance/concurrency change records for a user. + // codeType is optional - pass empty string to return all types. + // Also returns totalRecharged (sum of all positive balance top-ups). + GetUserBalanceHistory(ctx context.Context, userID int64, page, pageSize int, codeType string) ([]RedeemCode, int64, float64, error) // Group management ListGroups(ctx context.Context, page, pageSize int, platform, status, search string, isExclusive *bool) ([]Group, int64, error) @@ -522,6 +526,21 @@ func (s *adminServiceImpl) GetUserUsageStats(ctx context.Context, userID int64, }, nil } +// GetUserBalanceHistory returns paginated balance/concurrency change records for a user. +func (s *adminServiceImpl) GetUserBalanceHistory(ctx context.Context, userID int64, page, pageSize int, codeType string) ([]RedeemCode, int64, float64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + codes, result, err := s.redeemCodeRepo.ListByUserPaginated(ctx, userID, params, codeType) + if err != nil { + return nil, 0, 0, err + } + // Aggregate total recharged amount (only once, regardless of type filter) + totalRecharged, err := s.redeemCodeRepo.SumPositiveBalanceByUser(ctx, userID) + if err != nil { + return nil, 0, 0, err + } + return codes, result.Total, totalRecharged, nil +} + // Group management implementations func (s *adminServiceImpl) ListGroups(ctx context.Context, page, pageSize int, platform, status, search string, isExclusive *bool) ([]Group, int64, error) { params := pagination.PaginationParams{Page: page, PageSize: pageSize} diff --git a/backend/internal/service/redeem_service.go b/backend/internal/service/redeem_service.go index ff52dc47..dd99d4c6 100644 --- a/backend/internal/service/redeem_service.go +++ b/backend/internal/service/redeem_service.go @@ -49,6 +49,11 @@ type RedeemCodeRepository interface { List(ctx context.Context, params pagination.PaginationParams) ([]RedeemCode, *pagination.PaginationResult, error) ListWithFilters(ctx context.Context, params pagination.PaginationParams, codeType, status, search string) ([]RedeemCode, *pagination.PaginationResult, error) ListByUser(ctx context.Context, userID int64, limit int) ([]RedeemCode, error) + // ListByUserPaginated returns paginated balance/concurrency history for a specific user. + // codeType filter is optional - pass empty string to return all types. + ListByUserPaginated(ctx context.Context, userID int64, params pagination.PaginationParams, codeType string) ([]RedeemCode, *pagination.PaginationResult, error) + // SumPositiveBalanceByUser returns the total recharged amount (sum of positive balance values) for a user. + SumPositiveBalanceByUser(ctx context.Context, userID int64) (float64, error) } // GenerateCodesRequest 生成兑换码请求 diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts index e86f6348..b61858f7 100644 --- a/frontend/src/api/admin/index.ts +++ b/frontend/src/api/admin/index.ts @@ -59,3 +59,6 @@ export { } export default adminAPI + +// Re-export types used by components +export type { BalanceHistoryItem } from './users' diff --git a/frontend/src/api/admin/users.ts b/frontend/src/api/admin/users.ts index 734e3ac7..287aef96 100644 --- a/frontend/src/api/admin/users.ts +++ b/frontend/src/api/admin/users.ts @@ -174,6 +174,53 @@ export async function getUserUsageStats( return data } +/** + * Balance history item returned from the API + */ +export interface BalanceHistoryItem { + id: number + code: string + type: string + value: number + status: string + used_by: number | null + used_at: string | null + created_at: string + group_id: number | null + validity_days: number + notes: string + user?: { id: number; email: string } | null + group?: { id: number; name: string } | null +} + +// Balance history response extends pagination with total_recharged summary +export interface BalanceHistoryResponse extends PaginatedResponse { + total_recharged: number +} + +/** + * Get user's balance/concurrency change history + * @param id - User ID + * @param page - Page number + * @param pageSize - Items per page + * @param type - Optional type filter (balance, admin_balance, concurrency, admin_concurrency, subscription) + * @returns Paginated balance history with total_recharged + */ +export async function getUserBalanceHistory( + id: number, + page: number = 1, + pageSize: number = 20, + type?: string +): Promise { + const params: Record = { page, page_size: pageSize } + if (type) params.type = type + const { data } = await apiClient.get( + `/admin/users/${id}/balance-history`, + { params } + ) + return data +} + export const usersAPI = { list, getById, @@ -184,7 +231,8 @@ export const usersAPI = { updateConcurrency, toggleStatus, getUserApiKeys, - getUserUsageStats + getUserUsageStats, + getUserBalanceHistory } export default usersAPI diff --git a/frontend/src/components/admin/user/UserBalanceHistoryModal.vue b/frontend/src/components/admin/user/UserBalanceHistoryModal.vue new file mode 100644 index 00000000..e7dfdb7d --- /dev/null +++ b/frontend/src/components/admin/user/UserBalanceHistoryModal.vue @@ -0,0 +1,320 @@ + + + diff --git a/frontend/src/components/common/BaseDialog.vue b/frontend/src/components/common/BaseDialog.vue index 3d38b568..93e4ba36 100644 --- a/frontend/src/components/common/BaseDialog.vue +++ b/frontend/src/components/common/BaseDialog.vue @@ -4,6 +4,7 @@ diff --git a/frontend/src/components/admin/account/ImportDataModal.vue b/frontend/src/components/admin/account/ImportDataModal.vue new file mode 100644 index 00000000..5b42fe17 --- /dev/null +++ b/frontend/src/components/admin/account/ImportDataModal.vue @@ -0,0 +1,168 @@ + + + diff --git a/frontend/src/components/common/ConfirmDialog.vue b/frontend/src/components/common/ConfirmDialog.vue index abccc416..6ffd9b77 100644 --- a/frontend/src/components/common/ConfirmDialog.vue +++ b/frontend/src/components/common/ConfirmDialog.vue @@ -2,6 +2,7 @@

{{ message }}

+
+
@@ -218,9 +226,16 @@ + + + + @@ -242,6 +257,7 @@ import AccountTableActions from '@/components/admin/account/AccountTableActions. import AccountTableFilters from '@/components/admin/account/AccountTableFilters.vue' import AccountBulkActionsBar from '@/components/admin/account/AccountBulkActionsBar.vue' import AccountActionMenu from '@/components/admin/account/AccountActionMenu.vue' +import ImportDataModal from '@/components/admin/account/ImportDataModal.vue' import ReAuthAccountModal from '@/components/admin/account/ReAuthAccountModal.vue' import AccountTestModal from '@/components/admin/account/AccountTestModal.vue' import AccountStatsModal from '@/components/admin/account/AccountStatsModal.vue' @@ -265,6 +281,9 @@ const selIds = ref([]) const showCreate = ref(false) const showEdit = ref(false) const showSync = ref(false) +const showImportData = ref(false) +const showExportDataDialog = ref(false) +const includeProxyOnExport = ref(true) const showBulkEdit = ref(false) const showTempUnsched = ref(false) const showDeleteDialog = ref(false) @@ -279,6 +298,7 @@ const testingAcc = ref(null) const statsAcc = ref(null) const togglingSchedulable = ref(null) const menu = reactive<{show:boolean, acc:Account|null, pos:{top:number, left:number}|null}>({ show: false, acc: null, pos: null }) +const exportingData = ref(false) // Column settings const showColumnDropdown = ref(false) @@ -405,6 +425,8 @@ const isAnyModalOpen = computed(() => { showCreate.value || showEdit.value || showSync.value || + showImportData.value || + showExportDataDialog.value || showBulkEdit.value || showTempUnsched.value || showDeleteDialog.value || @@ -633,6 +655,50 @@ const handleBulkToggleSchedulable = async (schedulable: boolean) => { } } const handleBulkUpdated = () => { showBulkEdit.value = false; selIds.value = []; reload() } +const handleDataImported = () => { showImportData.value = false; reload() } +const formatExportTimestamp = () => { + const now = new Date() + const pad2 = (value: number) => String(value).padStart(2, '0') + return `${now.getFullYear()}${pad2(now.getMonth() + 1)}${pad2(now.getDate())}${pad2(now.getHours())}${pad2(now.getMinutes())}${pad2(now.getSeconds())}` +} +const openExportDataDialog = () => { + includeProxyOnExport.value = true + showExportDataDialog.value = true +} +const handleExportData = async () => { + if (exportingData.value) return + exportingData.value = true + try { + const dataPayload = await adminAPI.accounts.exportData( + selIds.value.length > 0 + ? { ids: selIds.value, includeProxies: includeProxyOnExport.value } + : { + includeProxies: includeProxyOnExport.value, + filters: { + platform: params.platform, + type: params.type, + status: params.status, + search: params.search + } + } + ) + const timestamp = formatExportTimestamp() + const filename = `sub2api-account-${timestamp}.json` + const blob = new Blob([JSON.stringify(dataPayload, null, 2)], { type: 'application/json' }) + const url = URL.createObjectURL(blob) + const link = document.createElement('a') + link.href = url + link.download = filename + link.click() + URL.revokeObjectURL(url) + appStore.showSuccess(t('admin.accounts.dataExported')) + } catch (error: any) { + appStore.showError(error?.message || t('admin.accounts.dataExportFailed')) + } finally { + exportingData.value = false + showExportDataDialog.value = false + } +} const closeTestModal = () => { showTest.value = false; testingAcc.value = null } const closeStatsModal = () => { showStats.value = false; statsAcc.value = null } const closeReAuthModal = () => { showReAuth.value = false; reAuthAcc.value = null } diff --git a/frontend/src/views/admin/ProxiesView.vue b/frontend/src/views/admin/ProxiesView.vue index 3bd766b6..f6cec7ac 100644 --- a/frontend/src/views/admin/ProxiesView.vue +++ b/frontend/src/views/admin/ProxiesView.vue @@ -69,6 +69,9 @@ {{ t('admin.proxies.batchDeleteAction') }} + +
-

- {{ fileName }} -

(null) const result = ref(null) +const fileInput = ref(null) const fileName = computed(() => file.value?.name || '') const errorItems = computed(() => result.value?.errors || []) @@ -110,10 +122,17 @@ watch( if (open) { file.value = null result.value = null + if (fileInput.value) { + fileInput.value.value = '' + } } } ) +const openFilePicker = () => { + fileInput.value?.click() +} + const handleFileChange = (event: Event) => { const target = event.target as HTMLInputElement file.value = target.files?.[0] || null diff --git a/frontend/src/components/admin/proxy/ImportDataModal.vue b/frontend/src/components/admin/proxy/ImportDataModal.vue new file mode 100644 index 00000000..6999ecc1 --- /dev/null +++ b/frontend/src/components/admin/proxy/ImportDataModal.vue @@ -0,0 +1,183 @@ + + + diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 8a7fb48f..3c407080 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -165,6 +165,7 @@ export default { selectedCount: '({count} selected)', refresh: 'Refresh', settings: 'Settings', + chooseFile: 'Choose File', notAvailable: 'N/A', now: 'Now', unknown: 'Unknown', @@ -1190,7 +1191,7 @@ export default { syncFromCrs: 'Sync from CRS', dataExport: 'Export', dataExportSelected: 'Export Selected', - dataExportIncludeProxies: 'Include proxies (unchecked = no proxy linkage on import)', + dataExportIncludeProxies: 'Include proxies linked to the exported accounts', dataImport: 'Import', dataExportConfirmMessage: 'The exported data contains sensitive account and proxy information. Store it securely.', dataExportConfirm: 'Confirm Export', @@ -1198,7 +1199,7 @@ export default { dataExportFailed: 'Failed to export data', dataImportTitle: 'Import Data', dataImportHint: 'Upload the exported JSON file to import accounts and proxies.', - dataImportWarning: 'Import will create new accounts/proxies; groups must be bound manually. Ensure no conflicts in the target instance.', + dataImportWarning: 'Import will create new accounts/proxies; groups must be bound manually. Ensure existing data does not conflict.', dataImportFile: 'Data file', dataImportButton: 'Start Import', dataImporting: 'Importing...', @@ -1901,6 +1902,21 @@ export default { createProxy: 'Create Proxy', editProxy: 'Edit Proxy', deleteProxy: 'Delete Proxy', + dataImport: 'Import', + dataImportTitle: 'Import Proxies', + dataImportHint: 'Upload the exported proxy JSON file to import proxies in bulk.', + dataImportWarning: 'Import will create or reuse proxies, keep their status, and trigger latency checks after completion.', + dataImportFile: 'Data File', + dataImportButton: 'Start Import', + dataImporting: 'Importing...', + dataImportSelectFile: 'Please select a data file', + dataImportParseFailed: 'Failed to parse data', + dataImportFailed: 'Failed to import data', + dataImportResult: 'Import Result', + dataImportResultSummary: 'Created {proxy_created}, reused {proxy_reused}, failed {proxy_failed}', + dataImportErrors: 'Failure Details', + dataImportSuccess: 'Import completed: created {proxy_created}, reused {proxy_reused}', + dataImportCompletedWithErrors: 'Import completed with errors: failed {proxy_failed}', dataExport: 'Export', dataExportConfirmMessage: 'The exported data contains sensitive proxy information. Store it securely.', dataExportConfirm: 'Confirm Export', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 006a7bd2..9ff89dfc 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -162,6 +162,7 @@ export default { selectedCount: '(已选 {count} 个)', refresh: '刷新', settings: '设置', + chooseFile: '选择文件', notAvailable: '不可用', now: '现在', unknown: '未知', @@ -1275,7 +1276,7 @@ export default { syncFromCrs: '从 CRS 同步', dataExport: '导出', dataExportSelected: '导出选中', - dataExportIncludeProxies: '导出代理(取消后导入时不关联代理)', + dataExportIncludeProxies: '导出代理(导出账号关联的代理)', dataImport: '导入', dataExportConfirmMessage: '导出的数据包含账号与代理的敏感信息,请妥善保存。', dataExportConfirm: '确认导出', @@ -1283,7 +1284,7 @@ export default { dataExportFailed: '数据导出失败', dataImportTitle: '导入数据', dataImportHint: '上传导出的 JSON 文件以批量导入账号与代理。', - dataImportWarning: '导入将创建新账号与代理,分组需手工绑定;请确认目标实例已有数据不会冲突。', + dataImportWarning: '导入将创建新账号与代理,分组需手工绑定;请确认已有数据不会冲突。', dataImportFile: '数据文件', dataImportButton: '开始导入', dataImporting: '导入中...', @@ -2010,6 +2011,21 @@ export default { deleteProxy: '删除代理', deleteConfirmMessage: "确定要删除代理 '{name}' 吗?", testProxy: '测试代理', + dataImport: '导入', + dataImportTitle: '导入代理', + dataImportHint: '上传代理导出的 JSON 文件以批量导入代理。', + dataImportWarning: '导入将创建或复用代理,保留状态并在完成后自动触发延迟检测。', + dataImportFile: '数据文件', + dataImportButton: '开始导入', + dataImporting: '导入中...', + dataImportSelectFile: '请选择数据文件', + dataImportParseFailed: '数据解析失败', + dataImportFailed: '数据导入失败', + dataImportResult: '导入结果', + dataImportResultSummary: '创建 {proxy_created},复用 {proxy_reused},失败 {proxy_failed}', + dataImportErrors: '失败详情', + dataImportSuccess: '导入完成:创建 {proxy_created},复用 {proxy_reused}', + dataImportCompletedWithErrors: '导入完成但有错误:失败 {proxy_failed}', dataExport: '导出', dataExportConfirmMessage: '导出的数据包含代理的敏感信息,请妥善保存。', dataExportConfirm: '确认导出', diff --git a/frontend/src/views/admin/AccountsView.vue b/frontend/src/views/admin/AccountsView.vue index d8ecd372..e6fe25c8 100644 --- a/frontend/src/views/admin/AccountsView.vue +++ b/frontend/src/views/admin/AccountsView.vue @@ -118,6 +118,15 @@ default-sort-order="asc" :sort-storage-key="ACCOUNT_SORT_STORAGE_KEY" > + @@ -551,6 +560,21 @@ const openMenu = (a: Account, e: MouseEvent) => { menu.show = true } const toggleSel = (id: number) => { const i = selIds.value.indexOf(id); if(i === -1) selIds.value.push(id); else selIds.value.splice(i, 1) } +const allVisibleSelected = computed(() => { + if (accounts.value.length === 0) return false + return accounts.value.every(account => selIds.value.includes(account.id)) +}) +const toggleSelectAllVisible = (event: Event) => { + const target = event.target as HTMLInputElement + if (target.checked) { + const next = new Set(selIds.value) + accounts.value.forEach(account => next.add(account.id)) + selIds.value = Array.from(next) + return + } + const visibleIds = new Set(accounts.value.map(account => account.id)) + selIds.value = selIds.value.filter(id => !visibleIds.has(id)) +} const selectPage = () => { selIds.value = [...new Set([...selIds.value, ...accounts.value.map(a => a.id)])] } const handleBulkDelete = async () => { if(!confirm(t('common.confirm'))) return; try { await Promise.all(selIds.value.map(id => adminAPI.accounts.delete(id))); selIds.value = []; reload() } catch (error) { console.error('Failed to bulk delete accounts:', error) } } const updateSchedulableInList = (accountIds: number[], schedulable: boolean) => { diff --git a/frontend/src/views/admin/ProxiesView.vue b/frontend/src/views/admin/ProxiesView.vue index f6cec7ac..b644eb33 100644 --- a/frontend/src/views/admin/ProxiesView.vue +++ b/frontend/src/views/admin/ProxiesView.vue @@ -69,6 +69,9 @@ {{ t('admin.proxies.batchDeleteAction') }} + @@ -619,6 +622,12 @@ @cancel="showExportDataDialog = false" /> + + { batchParseResult.proxies = [] } +const handleDataImported = () => { + showImportData.value = false + loadProxies() +} + // Parse proxy URL: protocol://user:pass@host:port or protocol://host:port const parseProxyUrl = ( line: string From 0c660f8335f58cf7222883be5dbda6c30aa860d6 Mon Sep 17 00:00:00 2001 From: LLLLLLiulei <1065070665@qq.com> Date: Thu, 5 Feb 2026 18:35:00 +0800 Subject: [PATCH 130/214] feat: refine proxy export and toolbar layout --- .../handler/admin/admin_service_stub_test.go | 6 ++ backend/internal/handler/admin/proxy_data.go | 79 +++++++++++++-- .../handler/admin/proxy_data_handler_test.go | 39 ++++++++ frontend/src/api/admin/proxies.ts | 24 +++-- .../admin/account/AccountTableActions.vue | 1 + frontend/src/i18n/locales/en.ts | 1 + frontend/src/i18n/locales/zh.ts | 1 + frontend/src/views/admin/AccountsView.vue | 2 +- frontend/src/views/admin/ProxiesView.vue | 97 ++++++++++--------- 9 files changed, 186 insertions(+), 64 deletions(-) diff --git a/backend/internal/handler/admin/admin_service_stub_test.go b/backend/internal/handler/admin/admin_service_stub_test.go index c1256081..a5376e4a 100644 --- a/backend/internal/handler/admin/admin_service_stub_test.go +++ b/backend/internal/handler/admin/admin_service_stub_test.go @@ -259,6 +259,12 @@ func (s *stubAdminService) GetAllProxiesWithAccountCount(ctx context.Context) ([ } func (s *stubAdminService) GetProxy(ctx context.Context, id int64) (*service.Proxy, error) { + for i := range s.proxies { + proxy := s.proxies[i] + if proxy.ID == id { + return &proxy, nil + } + } proxy := service.Proxy{ID: id, Name: "proxy", Status: service.StatusActive} return &proxy, nil } diff --git a/backend/internal/handler/admin/proxy_data.go b/backend/internal/handler/admin/proxy_data.go index 0bcab027..bc2a76ab 100644 --- a/backend/internal/handler/admin/proxy_data.go +++ b/backend/internal/handler/admin/proxy_data.go @@ -2,6 +2,8 @@ package admin import ( "context" + "fmt" + "strconv" "strings" "time" @@ -14,17 +16,32 @@ import ( func (h *ProxyHandler) ExportData(c *gin.Context) { ctx := c.Request.Context() - protocol := c.Query("protocol") - status := c.Query("status") - search := strings.TrimSpace(c.Query("search")) - if len(search) > 100 { - search = search[:100] + selectedIDs, err := parseProxyIDs(c) + if err != nil { + response.BadRequest(c, err.Error()) + return } - proxies, err := h.listProxiesFiltered(ctx, protocol, status, search) - if err != nil { - response.ErrorFrom(c, err) - return + var proxies []service.Proxy + if len(selectedIDs) > 0 { + proxies, err = h.getProxiesByIDs(ctx, selectedIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + } else { + protocol := c.Query("protocol") + status := c.Query("status") + search := strings.TrimSpace(c.Query("search")) + if len(search) > 100 { + search = search[:100] + } + + proxies, err = h.listProxiesFiltered(ctx, protocol, status, search) + if err != nil { + response.ErrorFrom(c, err) + return + } } dataProxies := make([]DataProxy, 0, len(proxies)) @@ -168,6 +185,50 @@ func (h *ProxyHandler) ImportData(c *gin.Context) { response.Success(c, result) } +func (h *ProxyHandler) getProxiesByIDs(ctx context.Context, ids []int64) ([]service.Proxy, error) { + out := make([]service.Proxy, 0, len(ids)) + for _, id := range ids { + proxy, err := h.adminService.GetProxy(ctx, id) + if err != nil { + return nil, err + } + if proxy == nil { + continue + } + out = append(out, *proxy) + } + return out, nil +} + +func parseProxyIDs(c *gin.Context) ([]int64, error) { + values := c.QueryArray("ids") + if len(values) == 0 { + raw := strings.TrimSpace(c.Query("ids")) + if raw != "" { + values = []string{raw} + } + } + if len(values) == 0 { + return nil, nil + } + + ids := make([]int64, 0, len(values)) + for _, item := range values { + for _, part := range strings.Split(item, ",") { + part = strings.TrimSpace(part) + if part == "" { + continue + } + id, err := strconv.ParseInt(part, 10, 64) + if err != nil || id <= 0 { + return nil, fmt.Errorf("invalid proxy id: %s", part) + } + ids = append(ids, id) + } + } + return ids, nil +} + func (h *ProxyHandler) listProxiesFiltered(ctx context.Context, protocol, status, search string) ([]service.Proxy, error) { page := 1 pageSize := dataPageCap diff --git a/backend/internal/handler/admin/proxy_data_handler_test.go b/backend/internal/handler/admin/proxy_data_handler_test.go index f7609097..545b24a3 100644 --- a/backend/internal/handler/admin/proxy_data_handler_test.go +++ b/backend/internal/handler/admin/proxy_data_handler_test.go @@ -75,6 +75,45 @@ func TestProxyExportDataRespectsFilters(t *testing.T) { require.Equal(t, "https", resp.Data.Proxies[0].Protocol) } +func TestProxyExportDataWithSelectedIDs(t *testing.T) { + router, adminSvc := setupProxyDataRouter() + + adminSvc.proxies = []service.Proxy{ + { + ID: 1, + Name: "proxy-a", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Username: "user", + Password: "pass", + Status: service.StatusActive, + }, + { + ID: 2, + Name: "proxy-b", + Protocol: "https", + Host: "10.0.0.2", + Port: 443, + Username: "u", + Password: "p", + Status: service.StatusDisabled, + }, + } + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/proxies/data?ids=2", nil) + router.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + var resp proxyDataResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, 0, resp.Code) + require.Len(t, resp.Data.Proxies, 1) + require.Equal(t, "https", resp.Data.Proxies[0].Protocol) + require.Equal(t, "10.0.0.2", resp.Data.Proxies[0].Host) +} + func TestProxyImportDataReusesAndTriggersLatencyProbe(t *testing.T) { router, adminSvc := setupProxyDataRouter() diff --git a/frontend/src/api/admin/proxies.ts b/frontend/src/api/admin/proxies.ts index 76c96c7d..b6aaf595 100644 --- a/frontend/src/api/admin/proxies.ts +++ b/frontend/src/api/admin/proxies.ts @@ -210,14 +210,24 @@ export async function batchDelete(ids: number[]): Promise<{ return data } -export async function exportData(filters?: { - protocol?: string - status?: 'active' | 'inactive' - search?: string +export async function exportData(options?: { + ids?: number[] + filters?: { + protocol?: string + status?: 'active' | 'inactive' + search?: string + } }): Promise { - const { data } = await apiClient.get('/admin/proxies/data', { - params: filters - }) + const params: Record = {} + if (options?.ids && options.ids.length > 0) { + params.ids = options.ids.join(',') + } else if (options?.filters) { + const { protocol, status, search } = options.filters + if (protocol) params.protocol = protocol + if (status) params.status = status + if (search) params.search = search + } + const { data } = await apiClient.get('/admin/proxies/data', { params }) return data } diff --git a/frontend/src/components/admin/account/AccountTableActions.vue b/frontend/src/components/admin/account/AccountTableActions.vue index a449f866..ee521f83 100644 --- a/frontend/src/components/admin/account/AccountTableActions.vue +++ b/frontend/src/components/admin/account/AccountTableActions.vue @@ -6,6 +6,7 @@ +
diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 3c407080..0a267f06 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1903,6 +1903,7 @@ export default { editProxy: 'Edit Proxy', deleteProxy: 'Delete Proxy', dataImport: 'Import', + dataExportSelected: 'Export Selected', dataImportTitle: 'Import Proxies', dataImportHint: 'Upload the exported proxy JSON file to import proxies in bulk.', dataImportWarning: 'Import will create or reuse proxies, keep their status, and trigger latency checks after completion.', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 9ff89dfc..e1a70054 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -2012,6 +2012,7 @@ export default { deleteConfirmMessage: "确定要删除代理 '{name}' 吗?", testProxy: '测试代理', dataImport: '导入', + dataExportSelected: '导出选中', dataImportTitle: '导入代理', dataImportHint: '上传代理导出的 JSON 文件以批量导入代理。', dataImportWarning: '导入将创建或复用代理,保留状态并在完成后自动触发延迟检测。', diff --git a/frontend/src/views/admin/AccountsView.vue b/frontend/src/views/admin/AccountsView.vue index e6fe25c8..d19e010e 100644 --- a/frontend/src/views/admin/AccountsView.vue +++ b/frontend/src/views/admin/AccountsView.vue @@ -96,7 +96,7 @@ -