From 2c35f0276f4cb57151f36ade7e5e2cba186c2551 Mon Sep 17 00:00:00 2001 From: shaw Date: Wed, 31 Dec 2025 20:46:54 +0800 Subject: [PATCH 01/51] =?UTF-8?q?fix(frontend):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E6=97=A0=E9=99=90=E5=88=B6=E8=AE=A2=E9=98=85=E7=9A=84=E6=98=BE?= =?UTF-8?q?=E7=A4=BA=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../common/SubscriptionProgressMini.vue | 188 ++++++++++-------- frontend/src/i18n/locales/en.ts | 5 +- frontend/src/i18n/locales/zh.ts | 5 +- .../src/views/admin/SubscriptionsView.vue | 9 +- frontend/src/views/user/SubscriptionsView.vue | 18 +- 5 files changed, 134 insertions(+), 91 deletions(-) diff --git a/frontend/src/components/common/SubscriptionProgressMini.vue b/frontend/src/components/common/SubscriptionProgressMini.vue index b84175e9..92198c2c 100644 --- a/frontend/src/components/common/SubscriptionProgressMini.vue +++ b/frontend/src/components/common/SubscriptionProgressMini.vue @@ -69,94 +69,108 @@ - +
-
- {{ - t('subscriptionProgress.daily') - }} -
-
-
- - {{ - formatUsage(subscription.daily_usage_usd, subscription.group?.daily_limit_usd) - }} + +
+ + + {{ t('subscriptionProgress.unlimited') }}
-
- {{ - t('subscriptionProgress.weekly') - }} -
-
+ +
@@ -215,7 +229,19 @@ function getMaxUsagePercentage(sub: UserSubscription): number { return percentages.length > 0 ? Math.max(...percentages) : 0 } +function isUnlimited(sub: UserSubscription): boolean { + return ( + !sub.group?.daily_limit_usd && + !sub.group?.weekly_limit_usd && + !sub.group?.monthly_limit_usd + ) +} + function getProgressDotClass(sub: UserSubscription): string { + // Unlimited subscriptions get a special color + if (isUnlimited(sub)) { + return 'bg-emerald-500' + } const maxPercentage = getMaxUsagePercentage(sub) if (maxPercentage >= 90) return 'bg-red-500' if (maxPercentage >= 70) return 'bg-orange-500' diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index d153b553..6d1095cf 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -749,6 +749,7 @@ export default { weekly: 'Weekly', monthly: 'Monthly', noLimits: 'No limits configured', + unlimited: 'Unlimited', resetNow: 'Resetting soon', windowNotActive: 'Window not active', resetInMinutes: 'Resets in {minutes}m', @@ -1492,7 +1493,8 @@ export default { expiresToday: 'Expires today', expiresTomorrow: 'Expires tomorrow', viewAll: 'View all subscriptions', - noSubscriptions: 'No active subscriptions' + noSubscriptions: 'No active subscriptions', + unlimited: 'Unlimited' }, // Version Badge @@ -1535,6 +1537,7 @@ export default { expires: 'Expires', noExpiration: 'No expiration', unlimited: 'Unlimited', + unlimitedDesc: 'No usage limits on this subscription', daily: 'Daily', weekly: 'Weekly', monthly: 'Monthly', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index c6105683..97d57051 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -840,6 +840,7 @@ export default { weekly: '每周', monthly: '每月', noLimits: '未配置限额', + unlimited: '无限制', resetNow: '即将重置', windowNotActive: '窗口未激活', resetInMinutes: '{minutes} 分钟后重置', @@ -1689,7 +1690,8 @@ export default { expiresToday: '今天到期', expiresTomorrow: '明天到期', viewAll: '查看全部订阅', - noSubscriptions: '暂无有效订阅' + noSubscriptions: '暂无有效订阅', + unlimited: '无限制' }, // Version Badge @@ -1731,6 +1733,7 @@ export default { expires: '到期时间', noExpiration: '无到期时间', unlimited: '无限制', + unlimitedDesc: '该订阅无用量限制', daily: '每日', weekly: '每周', monthly: '每月', diff --git a/frontend/src/views/admin/SubscriptionsView.vue b/frontend/src/views/admin/SubscriptionsView.vue index bd6a17eb..679c3275 100644 --- a/frontend/src/views/admin/SubscriptionsView.vue +++ b/frontend/src/views/admin/SubscriptionsView.vue @@ -202,16 +202,19 @@
- +
- {{ t('admin.subscriptions.noLimits') }} + + + {{ t('admin.subscriptions.unlimited') }} +
diff --git a/frontend/src/views/user/SubscriptionsView.vue b/frontend/src/views/user/SubscriptionsView.vue index dc93a9c1..b03b665a 100644 --- a/frontend/src/views/user/SubscriptionsView.vue +++ b/frontend/src/views/user/SubscriptionsView.vue @@ -230,18 +230,26 @@

- +
- {{ - t('userSubscriptions.unlimited') - }} +
+ +
+

+ {{ t('userSubscriptions.unlimited') }} +

+

+ {{ t('userSubscriptions.unlimitedDesc') }} +

+
+
From 15e676e9cd7250880e56e092976e8832abf82a67 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Wed, 31 Dec 2025 20:56:38 +0800 Subject: [PATCH 02/51] =?UTF-8?q?fix(upstream):=20=E6=94=AF=E6=8C=81=20Cla?= =?UTF-8?q?ude=20custom=20=E7=B1=BB=E5=9E=8B=E5=B7=A5=E5=85=B7=20(MCP)=20?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - ClaudeTool 结构体增加 Type 和 Custom 字段 - buildTools 函数支持从 custom 字段读取 input_schema - convertClaudeToolsToGeminiTools 函数支持 MCP 工具格式 - 修复 Antigravity upstream error 400: JSON schema invalid 修复 Issue 0.2: tools.X.custom.input_schema 验证错误 --- .../internal/pkg/antigravity/claude_types.go | 13 +++++++++- .../pkg/antigravity/request_transformer.go | 18 +++++++++++-- .../service/gemini_messages_compat_service.go | 26 ++++++++++++++++--- 3 files changed, 51 insertions(+), 6 deletions(-) diff --git a/backend/internal/pkg/antigravity/claude_types.go b/backend/internal/pkg/antigravity/claude_types.go index 9cab4cea..f394d7e3 100644 --- a/backend/internal/pkg/antigravity/claude_types.go +++ b/backend/internal/pkg/antigravity/claude_types.go @@ -37,8 +37,19 @@ type ClaudeMetadata struct { } // ClaudeTool Claude 工具定义 +// 支持两种格式: +// 1. 标准格式: { "name": "...", "description": "...", "input_schema": {...} } +// 2. Custom 格式 (MCP): { "type": "custom", "name": "...", "custom": { "description": "...", "input_schema": {...} } } type ClaudeTool struct { - Name string `json:"name"` + Type string `json:"type,omitempty"` // "custom" 或空(标准格式) + Name string `json:"name"` + Description string `json:"description,omitempty"` // 标准格式使用 + InputSchema map[string]any `json:"input_schema,omitempty"` // 标准格式使用 + Custom *CustomToolSpec `json:"custom,omitempty"` // custom 格式使用 +} + +// CustomToolSpec MCP custom 工具规格 +type CustomToolSpec struct { Description string `json:"description,omitempty"` InputSchema map[string]any `json:"input_schema"` } diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 2ff0ec02..51eb4299 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -379,12 +379,26 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 普通工具 var funcDecls []GeminiFunctionDecl for _, tool := range tools { + var description string + var inputSchema map[string]any + + // 检查是否为 custom 类型工具 (MCP) + if tool.Type == "custom" && tool.Custom != nil { + // Custom 格式: 从 custom 字段获取 description 和 input_schema + description = tool.Custom.Description + inputSchema = tool.Custom.InputSchema + } else { + // 标准格式: 从顶层字段获取 + description = tool.Description + inputSchema = tool.InputSchema + } + // 清理 JSON Schema - params := cleanJSONSchema(tool.InputSchema) + params := cleanJSONSchema(inputSchema) funcDecls = append(funcDecls, GeminiFunctionDecl{ Name: tool.Name, - Description: tool.Description, + Description: description, Parameters: params, }) } diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index ee3ade16..e55d798a 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -2245,12 +2245,32 @@ func convertClaudeToolsToGeminiTools(tools any) []any { if !ok { continue } - name, _ := tm["name"].(string) - desc, _ := tm["description"].(string) - params := tm["input_schema"] + + var name, desc string + var params any + + // 检查是否为 custom 类型工具 (MCP) + toolType, _ := tm["type"].(string) + if toolType == "custom" { + // Custom 格式: 从 custom 字段获取 description 和 input_schema + custom, ok := tm["custom"].(map[string]any) + if !ok { + continue + } + name, _ = tm["name"].(string) + desc, _ = custom["description"].(string) + params = custom["input_schema"] + } else { + // 标准格式: 从顶层字段获取 + name, _ = tm["name"].(string) + desc, _ = tm["description"].(string) + params = tm["input_schema"] + } + if name == "" { continue } + funcDecls = append(funcDecls, map[string]any{ "name": name, "description": desc, From 0b6371174e18ab03848b1358566a85694f46a2eb Mon Sep 17 00:00:00 2001 From: shaw Date: Wed, 31 Dec 2025 21:05:33 +0800 Subject: [PATCH 03/51] =?UTF-8?q?fix(settings):=20=E4=BF=9D=E5=AD=98=20Tur?= =?UTF-8?q?nstile=20=E8=AE=BE=E7=BD=AE=E6=97=B6=E9=AA=8C=E8=AF=81=E5=8F=82?= =?UTF-8?q?=E6=95=B0=E6=9C=89=E6=95=88=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/cmd/server/wire_gen.go | 2 +- .../internal/handler/admin/setting_handler.go | 42 ++++++++++++++++--- backend/internal/server/api_contract_test.go | 2 +- backend/internal/service/turnstile_service.go | 20 +++++++++ 4 files changed, 59 insertions(+), 7 deletions(-) diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index d469dcbb..c4859383 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -109,7 +109,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService) proxyHandler := admin.NewProxyHandler(adminService) adminRedeemHandler := admin.NewRedeemHandler(adminService) - settingHandler := admin.NewSettingHandler(settingService, emailService) + settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService) updateCache := repository.NewUpdateCache(redisClient) gitHubReleaseClient := repository.NewGitHubReleaseClient() serviceBuildInfo := provideServiceBuildInfo(buildInfo) diff --git a/backend/internal/handler/admin/setting_handler.go b/backend/internal/handler/admin/setting_handler.go index 14b569de..e533aef1 100644 --- a/backend/internal/handler/admin/setting_handler.go +++ b/backend/internal/handler/admin/setting_handler.go @@ -10,15 +10,17 @@ import ( // SettingHandler 系统设置处理器 type SettingHandler struct { - settingService *service.SettingService - emailService *service.EmailService + settingService *service.SettingService + emailService *service.EmailService + turnstileService *service.TurnstileService } // NewSettingHandler 创建系统设置处理器 -func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService) *SettingHandler { +func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService) *SettingHandler { return &SettingHandler{ - settingService: settingService, - emailService: emailService, + settingService: settingService, + emailService: emailService, + turnstileService: turnstileService, } } @@ -108,6 +110,36 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { req.SmtpPort = 587 } + // Turnstile 参数验证 + if req.TurnstileEnabled { + // 检查必填字段 + if req.TurnstileSiteKey == "" { + response.BadRequest(c, "Turnstile Site Key is required when enabled") + return + } + if req.TurnstileSecretKey == "" { + response.BadRequest(c, "Turnstile Secret Key is required when enabled") + return + } + + // 获取当前设置,检查参数是否有变化 + currentSettings, err := h.settingService.GetAllSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // 当 site_key 或 secret_key 任一变化时验证(避免配置错误导致无法登录) + siteKeyChanged := currentSettings.TurnstileSiteKey != req.TurnstileSiteKey + secretKeyChanged := currentSettings.TurnstileSecretKey != req.TurnstileSecretKey + if siteKeyChanged || secretKeyChanged { + if err := h.turnstileService.ValidateSecretKey(c.Request.Context(), req.TurnstileSecretKey); err != nil { + response.ErrorFrom(c, err) + return + } + } + } + settings := &service.SystemSettings{ RegistrationEnabled: req.RegistrationEnabled, EmailVerifyEnabled: req.EmailVerifyEnabled, diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 5a243bfc..3912c8fb 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -385,7 +385,7 @@ func newContractDeps(t *testing.T) *contractDeps { authHandler := handler.NewAuthHandler(cfg, nil, userService) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) usageHandler := handler.NewUsageHandler(usageService, apiKeyService) - adminSettingHandler := adminhandler.NewSettingHandler(settingService, nil) + adminSettingHandler := adminhandler.NewSettingHandler(settingService, nil, nil) jwtAuth := func(c *gin.Context) { c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{ diff --git a/backend/internal/service/turnstile_service.go b/backend/internal/service/turnstile_service.go index 2a68c11b..cfb87c57 100644 --- a/backend/internal/service/turnstile_service.go +++ b/backend/internal/service/turnstile_service.go @@ -11,6 +11,7 @@ import ( var ( ErrTurnstileVerificationFailed = infraerrors.BadRequest("TURNSTILE_VERIFICATION_FAILED", "turnstile verification failed") ErrTurnstileNotConfigured = infraerrors.ServiceUnavailable("TURNSTILE_NOT_CONFIGURED", "turnstile not configured") + ErrTurnstileInvalidSecretKey = infraerrors.BadRequest("TURNSTILE_INVALID_SECRET_KEY", "invalid turnstile secret key") ) // TurnstileVerifier 验证 Turnstile token 的接口 @@ -83,3 +84,22 @@ func (s *TurnstileService) VerifyToken(ctx context.Context, token string, remote func (s *TurnstileService) IsEnabled(ctx context.Context) bool { return s.settingService.IsTurnstileEnabled(ctx) } + +// ValidateSecretKey 验证 Turnstile Secret Key 是否有效 +func (s *TurnstileService) ValidateSecretKey(ctx context.Context, secretKey string) error { + // 发送一个测试token的验证请求来检查secret_key是否有效 + result, err := s.verifier.VerifyToken(ctx, secretKey, "test-validation", "") + if err != nil { + return fmt.Errorf("validate secret key: %w", err) + } + + // 检查是否有 invalid-input-secret 错误 + for _, code := range result.ErrorCodes { + if code == "invalid-input-secret" { + return ErrTurnstileInvalidSecretKey + } + } + + // 其他错误(如 invalid-input-response)说明 secret key 是有效的 + return nil +} From 35b768b71967740d30b9923fe7418f7eab7a4977 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Wed, 31 Dec 2025 21:35:41 +0800 Subject: [PATCH 04/51] =?UTF-8?q?fix(upstream):=20=E8=B7=B3=E8=BF=87=20Cla?= =?UTF-8?q?ude=20=E6=A8=A1=E5=9E=8B=E6=97=A0=20signature=20=E7=9A=84=20thi?= =?UTF-8?q?nking=20block?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - buildParts 函数检测 thinking block 的 signature - Claude 模型 (allowDummyThought=false) 时跳过无 signature 的 block - 记录警告日志以便调试 - Gemini 模型继续使用 dummy signature 兼容方案 修复 Issue 0.1: Claude thinking block signature 缺失错误 --- backend/internal/pkg/antigravity/request_transformer.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 51eb4299..e5ab8ece 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -3,6 +3,7 @@ package antigravity import ( "encoding/json" "fmt" + "log" "strings" "github.com/google/uuid" @@ -205,6 +206,10 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu // 保留原有 signature(Claude 模型需要有效的 signature) if block.Signature != "" { part.ThoughtSignature = block.Signature + } else if !allowDummyThought { + // Claude 模型需要有效 signature,跳过无 signature 的 thinking block + log.Printf("Warning: skipping thinking block without signature for Claude model") + continue } parts = append(parts, part) From c1e25b7ecf745a97832b9a1cc8827d6e6123dc69 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Wed, 31 Dec 2025 21:44:56 +0800 Subject: [PATCH 05/51] =?UTF-8?q?fix(upstream):=20=E5=AE=8C=E5=96=84?= =?UTF-8?q?=E8=BE=B9=E7=95=8C=E6=A3=80=E6=9F=A5=E5=92=8C=20thinking=20bloc?= =?UTF-8?q?k=20=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 基于 Gemini + Codex 审查结果的修复: 1. thinking block dummy signature 填充 - Gemini 模型现在会填充 dummyThoughtSignature - 与 tool_use 处理逻辑保持一致 2. 边界检查增强 - buildTools: 跳过空工具名称 - buildTools: 为 nil schema 提供默认值 - convertClaudeToolsToGeminiTools: 为 nil params 提供默认值 3. 防止下游 API 验证错误 - 确保所有工具都有有效的 parameters - 默认 schema: {type: 'object', properties: {}} 审查报告:Gemini 评分 95%, Codex 评分 8.2/10 --- .../pkg/antigravity/request_transformer.go | 17 +++++++++++++++++ .../service/gemini_messages_compat_service.go | 8 ++++++++ 2 files changed, 25 insertions(+) diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index e5ab8ece..e0b5b886 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -210,6 +210,9 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu // Claude 模型需要有效 signature,跳过无 signature 的 thinking block log.Printf("Warning: skipping thinking block without signature for Claude model") continue + } else { + // Gemini 模型使用 dummy signature + part.ThoughtSignature = dummyThoughtSignature } parts = append(parts, part) @@ -384,6 +387,12 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 普通工具 var funcDecls []GeminiFunctionDecl for _, tool := range tools { + // 跳过无效工具名称 + if tool.Name == "" { + log.Printf("Warning: skipping tool with empty name") + continue + } + var description string var inputSchema map[string]any @@ -401,6 +410,14 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 清理 JSON Schema params := cleanJSONSchema(inputSchema) + // 为 nil schema 提供默认值 + if params == nil { + params = map[string]any{ + "type": "OBJECT", + "properties": map[string]any{}, + } + } + funcDecls = append(funcDecls, GeminiFunctionDecl{ Name: tool.Name, Description: description, diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index e55d798a..a0bf1b6a 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -2271,6 +2271,14 @@ func convertClaudeToolsToGeminiTools(tools any) []any { continue } + // 为 nil params 提供默认值 + if params == nil { + params = map[string]any{ + "type": "object", + "properties": map[string]any{}, + } + } + funcDecls = append(funcDecls, map[string]any{ "name": name, "description": desc, From c5b792add579e3d837d5699928ca938e64346a08 Mon Sep 17 00:00:00 2001 From: shaw Date: Wed, 31 Dec 2025 22:48:35 +0800 Subject: [PATCH 06/51] =?UTF-8?q?fix(billing):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E9=99=90=E9=A2=9D=E4=B8=BA0=E6=97=B6=E6=B6=88=E8=B4=B9?= =?UTF-8?q?=E8=AE=B0=E5=BD=95=E5=A4=B1=E8=B4=A5=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 添加 normalizeLimit 函数,将 0 或负数限额规范化为 nil(无限制) - 简化 IncrementUsage,移除冗余的配额检查逻辑 - 配额检查已在请求前由中间件和网关完成 - 消费记录应无条件执行,确保数据完整性 - 删除测试限额超出行为的无效集成测试 --- .../repository/user_subscription_repo.go | 74 +--------- ...user_subscription_repo_integration_test.go | 137 +----------------- backend/internal/service/admin_service.go | 27 +++- .../internal/service/subscription_service.go | 1 + 4 files changed, 31 insertions(+), 208 deletions(-) diff --git a/backend/internal/repository/user_subscription_repo.go b/backend/internal/repository/user_subscription_repo.go index 2b308674..cd3b9db6 100644 --- a/backend/internal/repository/user_subscription_repo.go +++ b/backend/internal/repository/user_subscription_repo.go @@ -291,13 +291,11 @@ func (r *userSubscriptionRepository) ResetMonthlyUsage(ctx context.Context, id i return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } -// IncrementUsage 原子性地累加用量并校验限额。 -// 使用单条 SQL 语句同时检查 Group 的限额,如果任一限额即将超出则拒绝更新。 -// 当更新失败时,会执行额外查询确定具体超出的限额类型。 +// IncrementUsage 原子性地累加订阅用量。 +// 限额检查已在请求前由 BillingCacheService.CheckBillingEligibility 完成, +// 此处仅负责记录实际消费,确保消费数据的完整性。 func (r *userSubscriptionRepository) IncrementUsage(ctx context.Context, id int64, costUSD float64) error { - // 使用 JOIN 的原子更新:只有当所有限额条件满足时才执行累加 - // NULL 限额表示无限制 - const atomicUpdateSQL = ` + const updateSQL = ` UPDATE user_subscriptions us SET daily_usage_usd = us.daily_usage_usd + $1, @@ -309,13 +307,10 @@ func (r *userSubscriptionRepository) IncrementUsage(ctx context.Context, id int6 AND us.deleted_at IS NULL AND us.group_id = g.id AND g.deleted_at IS NULL - AND (g.daily_limit_usd IS NULL OR us.daily_usage_usd + $1 <= g.daily_limit_usd) - AND (g.weekly_limit_usd IS NULL OR us.weekly_usage_usd + $1 <= g.weekly_limit_usd) - AND (g.monthly_limit_usd IS NULL OR us.monthly_usage_usd + $1 <= g.monthly_limit_usd) ` client := clientFromContext(ctx, r.client) - result, err := client.ExecContext(ctx, atomicUpdateSQL, costUSD, id) + result, err := client.ExecContext(ctx, updateSQL, costUSD, id) if err != nil { return err } @@ -326,64 +321,11 @@ func (r *userSubscriptionRepository) IncrementUsage(ctx context.Context, id int6 } if affected > 0 { - return nil // 更新成功 + return nil } - // affected == 0:可能是订阅不存在、分组已删除、或限额超出 - // 执行额外查询确定具体原因 - return r.checkIncrementFailureReason(ctx, id, costUSD) -} - -// checkIncrementFailureReason 查询更新失败的具体原因 -func (r *userSubscriptionRepository) checkIncrementFailureReason(ctx context.Context, id int64, costUSD float64) error { - const checkSQL = ` - SELECT - CASE WHEN us.deleted_at IS NOT NULL THEN 'subscription_deleted' - WHEN g.id IS NULL THEN 'subscription_not_found' - WHEN g.deleted_at IS NOT NULL THEN 'group_deleted' - WHEN g.daily_limit_usd IS NOT NULL AND us.daily_usage_usd + $1 > g.daily_limit_usd THEN 'daily_exceeded' - WHEN g.weekly_limit_usd IS NOT NULL AND us.weekly_usage_usd + $1 > g.weekly_limit_usd THEN 'weekly_exceeded' - WHEN g.monthly_limit_usd IS NOT NULL AND us.monthly_usage_usd + $1 > g.monthly_limit_usd THEN 'monthly_exceeded' - ELSE 'unknown' - END AS reason - FROM user_subscriptions us - LEFT JOIN groups g ON us.group_id = g.id - WHERE us.id = $2 - ` - - client := clientFromContext(ctx, r.client) - rows, err := client.QueryContext(ctx, checkSQL, costUSD, id) - if err != nil { - return err - } - defer func() { _ = rows.Close() }() - - if !rows.Next() { - return service.ErrSubscriptionNotFound - } - - var reason string - if err := rows.Scan(&reason); err != nil { - return err - } - - if err := rows.Err(); err != nil { - return err - } - - switch reason { - case "subscription_not_found", "subscription_deleted", "group_deleted": - return service.ErrSubscriptionNotFound - case "daily_exceeded": - return service.ErrDailyLimitExceeded - case "weekly_exceeded": - return service.ErrWeeklyLimitExceeded - case "monthly_exceeded": - return service.ErrMonthlyLimitExceeded - default: - // unknown 情况理论上不应发生,但作为兜底返回 - return service.ErrSubscriptionNotFound - } + // affected == 0:订阅不存在或已删除 + return service.ErrSubscriptionNotFound } func (r *userSubscriptionRepository) BatchUpdateExpiredStatus(ctx context.Context) (int64, error) { diff --git a/backend/internal/repository/user_subscription_repo_integration_test.go b/backend/internal/repository/user_subscription_repo_integration_test.go index 3a6c6434..2099e5d8 100644 --- a/backend/internal/repository/user_subscription_repo_integration_test.go +++ b/backend/internal/repository/user_subscription_repo_integration_test.go @@ -633,112 +633,7 @@ func (s *UserSubscriptionRepoSuite) TestActiveExpiredBoundaries_UsageAndReset_Ba s.Require().Equal(service.SubscriptionStatusExpired, updated.Status, "expected status expired") } -// --- 限额检查与软删除过滤测试 --- - -func (s *UserSubscriptionRepoSuite) mustCreateGroupWithLimits(name string, daily, weekly, monthly *float64) *service.Group { - s.T().Helper() - - create := s.client.Group.Create(). - SetName(name). - SetStatus(service.StatusActive). - SetSubscriptionType(service.SubscriptionTypeSubscription) - - if daily != nil { - create.SetDailyLimitUsd(*daily) - } - if weekly != nil { - create.SetWeeklyLimitUsd(*weekly) - } - if monthly != nil { - create.SetMonthlyLimitUsd(*monthly) - } - - g, err := create.Save(s.ctx) - s.Require().NoError(err, "create group with limits") - return groupEntityToService(g) -} - -func (s *UserSubscriptionRepoSuite) TestIncrementUsage_DailyLimitExceeded() { - user := s.mustCreateUser("dailylimit@test.com", service.RoleUser) - dailyLimit := 10.0 - group := s.mustCreateGroupWithLimits("g-dailylimit", &dailyLimit, nil, nil) - sub := s.mustCreateSubscription(user.ID, group.ID, nil) - - // 先增加 9.0,应该成功 - err := s.repo.IncrementUsage(s.ctx, sub.ID, 9.0) - s.Require().NoError(err, "first increment should succeed") - - // 再增加 2.0,会超过 10.0 限额,应该失败 - err = s.repo.IncrementUsage(s.ctx, sub.ID, 2.0) - s.Require().Error(err, "should fail when daily limit exceeded") - s.Require().ErrorIs(err, service.ErrDailyLimitExceeded) - - // 验证用量没有变化 - got, err := s.repo.GetByID(s.ctx, sub.ID) - s.Require().NoError(err) - s.Require().InDelta(9.0, got.DailyUsageUSD, 1e-6, "usage should not change after failed increment") -} - -func (s *UserSubscriptionRepoSuite) TestIncrementUsage_WeeklyLimitExceeded() { - user := s.mustCreateUser("weeklylimit@test.com", service.RoleUser) - weeklyLimit := 50.0 - group := s.mustCreateGroupWithLimits("g-weeklylimit", nil, &weeklyLimit, nil) - sub := s.mustCreateSubscription(user.ID, group.ID, nil) - - // 增加 45.0,应该成功 - err := s.repo.IncrementUsage(s.ctx, sub.ID, 45.0) - s.Require().NoError(err, "first increment should succeed") - - // 再增加 10.0,会超过 50.0 限额,应该失败 - err = s.repo.IncrementUsage(s.ctx, sub.ID, 10.0) - s.Require().Error(err, "should fail when weekly limit exceeded") - s.Require().ErrorIs(err, service.ErrWeeklyLimitExceeded) -} - -func (s *UserSubscriptionRepoSuite) TestIncrementUsage_MonthlyLimitExceeded() { - user := s.mustCreateUser("monthlylimit@test.com", service.RoleUser) - monthlyLimit := 100.0 - group := s.mustCreateGroupWithLimits("g-monthlylimit", nil, nil, &monthlyLimit) - sub := s.mustCreateSubscription(user.ID, group.ID, nil) - - // 增加 90.0,应该成功 - err := s.repo.IncrementUsage(s.ctx, sub.ID, 90.0) - s.Require().NoError(err, "first increment should succeed") - - // 再增加 20.0,会超过 100.0 限额,应该失败 - err = s.repo.IncrementUsage(s.ctx, sub.ID, 20.0) - s.Require().Error(err, "should fail when monthly limit exceeded") - s.Require().ErrorIs(err, service.ErrMonthlyLimitExceeded) -} - -func (s *UserSubscriptionRepoSuite) TestIncrementUsage_NoLimits() { - user := s.mustCreateUser("nolimits@test.com", service.RoleUser) - group := s.mustCreateGroupWithLimits("g-nolimits", nil, nil, nil) // 无限额 - sub := s.mustCreateSubscription(user.ID, group.ID, nil) - - // 应该可以增加任意金额 - err := s.repo.IncrementUsage(s.ctx, sub.ID, 1000000.0) - s.Require().NoError(err, "should succeed without limits") - - got, err := s.repo.GetByID(s.ctx, sub.ID) - s.Require().NoError(err) - s.Require().InDelta(1000000.0, got.DailyUsageUSD, 1e-6) -} - -func (s *UserSubscriptionRepoSuite) TestIncrementUsage_AtExactLimit() { - user := s.mustCreateUser("exactlimit@test.com", service.RoleUser) - dailyLimit := 10.0 - group := s.mustCreateGroupWithLimits("g-exactlimit", &dailyLimit, nil, nil) - sub := s.mustCreateSubscription(user.ID, group.ID, nil) - - // 正好达到限额应该成功 - err := s.repo.IncrementUsage(s.ctx, sub.ID, 10.0) - s.Require().NoError(err, "should succeed at exact limit") - - got, err := s.repo.GetByID(s.ctx, sub.ID) - s.Require().NoError(err) - s.Require().InDelta(10.0, got.DailyUsageUSD, 1e-6) -} +// --- 软删除过滤测试 --- func (s *UserSubscriptionRepoSuite) TestIncrementUsage_SoftDeletedGroup() { user := s.mustCreateUser("softdeleted@test.com", service.RoleUser) @@ -779,7 +674,7 @@ func (s *UserSubscriptionRepoSuite) TestUpdate_NilInput() { func (s *UserSubscriptionRepoSuite) TestIncrementUsage_Concurrent() { user := s.mustCreateUser("concurrent@test.com", service.RoleUser) - group := s.mustCreateGroupWithLimits("g-concurrent", nil, nil, nil) // 无限额 + group := s.mustCreateGroup("g-concurrent") sub := s.mustCreateSubscription(user.ID, group.ID, nil) const numGoroutines = 10 @@ -808,34 +703,6 @@ func (s *UserSubscriptionRepoSuite) TestIncrementUsage_Concurrent() { s.Require().InDelta(expectedUsage, got.MonthlyUsageUSD, 1e-6, "monthly usage should be correctly accumulated") } -func (s *UserSubscriptionRepoSuite) TestIncrementUsage_ConcurrentWithLimit() { - user := s.mustCreateUser("concurrentlimit@test.com", service.RoleUser) - dailyLimit := 5.0 - group := s.mustCreateGroupWithLimits("g-concurrentlimit", &dailyLimit, nil, nil) - sub := s.mustCreateSubscription(user.ID, group.ID, nil) - - // 注意:事务内的操作是串行的,所以这里改为顺序执行以验证限额逻辑 - // 尝试增加 10 次,每次 1.0,但限额只有 5.0 - const numAttempts = 10 - const incrementPerAttempt = 1.0 - - successCount := 0 - for i := 0; i < numAttempts; i++ { - err := s.repo.IncrementUsage(s.ctx, sub.ID, incrementPerAttempt) - if err == nil { - successCount++ - } - } - - // 验证:应该有 5 次成功(不超过限额),5 次失败(超出限额) - s.Require().Equal(5, successCount, "exactly 5 increments should succeed (limit=5, increment=1)") - - // 验证最终用量等于限额 - got, err := s.repo.GetByID(s.ctx, sub.ID) - s.Require().NoError(err) - s.Require().InDelta(dailyLimit, got.DailyUsageUSD, 1e-6, "daily usage should equal limit") -} - func (s *UserSubscriptionRepoSuite) TestTxContext_RollbackIsolation() { baseClient := testEntClient(s.T()) tx, err := baseClient.Tx(context.Background()) diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 4be09810..feeb19a0 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -488,6 +488,11 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn subscriptionType = SubscriptionTypeStandard } + // 限额字段:0 和 nil 都表示"无限制" + dailyLimit := normalizeLimit(input.DailyLimitUSD) + weeklyLimit := normalizeLimit(input.WeeklyLimitUSD) + monthlyLimit := normalizeLimit(input.MonthlyLimitUSD) + group := &Group{ Name: input.Name, Description: input.Description, @@ -496,9 +501,9 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn IsExclusive: input.IsExclusive, Status: StatusActive, SubscriptionType: subscriptionType, - DailyLimitUSD: input.DailyLimitUSD, - WeeklyLimitUSD: input.WeeklyLimitUSD, - MonthlyLimitUSD: input.MonthlyLimitUSD, + DailyLimitUSD: dailyLimit, + WeeklyLimitUSD: weeklyLimit, + MonthlyLimitUSD: monthlyLimit, } if err := s.groupRepo.Create(ctx, group); err != nil { return nil, err @@ -506,6 +511,14 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn return group, nil } +// normalizeLimit 将 0 或负数转换为 nil(表示无限制) +func normalizeLimit(limit *float64) *float64 { + if limit == nil || *limit <= 0 { + return nil + } + return limit +} + func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *UpdateGroupInput) (*Group, error) { group, err := s.groupRepo.GetByID(ctx, id) if err != nil { @@ -535,15 +548,15 @@ func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *Upd if input.SubscriptionType != "" { group.SubscriptionType = input.SubscriptionType } - // 限额字段支持设置为nil(清除限额)或具体值 + // 限额字段:0 和 nil 都表示"无限制",正数表示具体限额 if input.DailyLimitUSD != nil { - group.DailyLimitUSD = input.DailyLimitUSD + group.DailyLimitUSD = normalizeLimit(input.DailyLimitUSD) } if input.WeeklyLimitUSD != nil { - group.WeeklyLimitUSD = input.WeeklyLimitUSD + group.WeeklyLimitUSD = normalizeLimit(input.WeeklyLimitUSD) } if input.MonthlyLimitUSD != nil { - group.MonthlyLimitUSD = input.MonthlyLimitUSD + group.MonthlyLimitUSD = normalizeLimit(input.MonthlyLimitUSD) } if err := s.groupRepo.Update(ctx, group); err != nil { diff --git a/backend/internal/service/subscription_service.go b/backend/internal/service/subscription_service.go index 09554c0f..f6aefb83 100644 --- a/backend/internal/service/subscription_service.go +++ b/backend/internal/service/subscription_service.go @@ -490,6 +490,7 @@ func (s *SubscriptionService) CheckAndResetWindows(ctx context.Context, sub *Use } // CheckUsageLimits 检查使用限额(返回错误如果超限) +// 用于中间件的快速预检查,additionalCost 通常为 0 func (s *SubscriptionService) CheckUsageLimits(ctx context.Context, sub *UserSubscription, group *Group, additionalCost float64) error { if !sub.CheckDailyLimit(group, additionalCost) { return ErrDailyLimitExceeded From bb7ade265da1da38154e1e44e57d060db7eb2c8e Mon Sep 17 00:00:00 2001 From: shaw Date: Wed, 31 Dec 2025 23:37:51 +0800 Subject: [PATCH 07/51] =?UTF-8?q?chore(token-refresh):=20=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0=20Antigravity=20Token=20=E5=88=B7=E6=96=B0=E8=B0=83?= =?UTF-8?q?=E8=AF=95=E6=97=A5=E5=BF=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - NeedsRefresh 判断为 true 时输出 expires_at、time_until_expiry、window - 修正注释中的刷新窗口描述(10分钟 → 15分钟) --- .../internal/service/antigravity_token_refresher.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/backend/internal/service/antigravity_token_refresher.go b/backend/internal/service/antigravity_token_refresher.go index b4739025..9dd4463f 100644 --- a/backend/internal/service/antigravity_token_refresher.go +++ b/backend/internal/service/antigravity_token_refresher.go @@ -2,6 +2,7 @@ package service import ( "context" + "fmt" "time" ) @@ -28,7 +29,7 @@ func (r *AntigravityTokenRefresher) CanRefresh(account *Account) bool { } // NeedsRefresh 检查账户是否需要刷新 -// Antigravity 使用固定的10分钟刷新窗口,忽略全局配置 +// Antigravity 使用固定的15分钟刷新窗口,忽略全局配置 func (r *AntigravityTokenRefresher) NeedsRefresh(account *Account, _ time.Duration) bool { if !r.CanRefresh(account) { return false @@ -37,7 +38,13 @@ func (r *AntigravityTokenRefresher) NeedsRefresh(account *Account, _ time.Durati if expiresAt == nil { return false } - return time.Until(*expiresAt) < antigravityRefreshWindow + timeUntilExpiry := time.Until(*expiresAt) + needsRefresh := timeUntilExpiry < antigravityRefreshWindow + if needsRefresh { + fmt.Printf("[AntigravityTokenRefresher] Account %d needs refresh: expires_at=%s, time_until_expiry=%v, window=%v\n", + account.ID, expiresAt.Format("2006-01-02 15:04:05"), timeUntilExpiry, antigravityRefreshWindow) + } + return needsRefresh } // Refresh 执行 token 刷新 From 2270a54ff6d9373b84082f20ac233f8fb419a563 Mon Sep 17 00:00:00 2001 From: NepetaLemon Date: Wed, 31 Dec 2025 23:42:01 +0800 Subject: [PATCH 08/51] =?UTF-8?q?refactor:=20=E7=A7=BB=E9=99=A4=20infrastr?= =?UTF-8?q?ucture=20=E7=9B=AE=E5=BD=95=20(#108)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: 迁移初始化 db 和 redis 到 repository * refactor: 迁移 errors 到 pkg --- backend/cmd/server/wire.go | 2 - backend/cmd/server/wire_gen.go | 7 +- backend/internal/infrastructure/wire.go | 79 ------------------- .../{infrastructure => pkg}/errors/errors.go | 0 .../errors/errors_test.go | 0 .../{infrastructure => pkg}/errors/http.go | 0 .../{infrastructure => pkg}/errors/types.go | 0 backend/internal/pkg/response/response.go | 2 +- .../internal/pkg/response/response_test.go | 14 ++-- .../{infrastructure => repository}/db_pool.go | 2 +- .../db_pool_test.go | 2 +- .../{infrastructure => repository}/ent.go | 2 +- .../internal/repository/error_translate.go | 2 +- .../repository/integration_harness_test.go | 3 +- .../migrations_runner.go | 2 +- .../migrations_schema_integration_test.go | 3 +- .../{infrastructure => repository}/redis.go | 2 +- .../redis_test.go | 2 +- backend/internal/repository/wire.go | 59 ++++++++++++++ .../internal/server/middleware/recovery.go | 2 +- .../server/middleware/recovery_test.go | 2 +- backend/internal/service/account_service.go | 2 +- backend/internal/service/api_key_service.go | 2 +- backend/internal/service/auth_service.go | 2 +- .../internal/service/billing_cache_service.go | 2 +- backend/internal/service/email_service.go | 2 +- backend/internal/service/group_service.go | 2 +- backend/internal/service/proxy_service.go | 2 +- backend/internal/service/redeem_service.go | 2 +- backend/internal/service/setting_service.go | 2 +- .../internal/service/subscription_service.go | 2 +- backend/internal/service/turnstile_service.go | 2 +- backend/internal/service/usage_service.go | 2 +- backend/internal/service/user_service.go | 2 +- backend/internal/setup/setup.go | 4 +- 35 files changed, 96 insertions(+), 121 deletions(-) delete mode 100644 backend/internal/infrastructure/wire.go rename backend/internal/{infrastructure => pkg}/errors/errors.go (100%) rename backend/internal/{infrastructure => pkg}/errors/errors_test.go (100%) rename backend/internal/{infrastructure => pkg}/errors/http.go (100%) rename backend/internal/{infrastructure => pkg}/errors/types.go (100%) rename backend/internal/{infrastructure => repository}/db_pool.go (97%) rename backend/internal/{infrastructure => repository}/db_pool_test.go (98%) rename backend/internal/{infrastructure => repository}/ent.go (99%) rename backend/internal/{infrastructure => repository}/migrations_runner.go (99%) rename backend/internal/{infrastructure => repository}/redis.go (98%) rename backend/internal/{infrastructure => repository}/redis_test.go (97%) diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index fffcd5f9..8596b8ba 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -12,7 +12,6 @@ import ( "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler" - "github.com/Wei-Shaw/sub2api/internal/infrastructure" "github.com/Wei-Shaw/sub2api/internal/repository" "github.com/Wei-Shaw/sub2api/internal/server" "github.com/Wei-Shaw/sub2api/internal/server/middleware" @@ -31,7 +30,6 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { wire.Build( // Infrastructure layer ProviderSets config.ProviderSet, - infrastructure.ProviderSet, // Business layer ProviderSets repository.ProviderSet, diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index c4859383..83cba823 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -12,7 +12,6 @@ import ( "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler" "github.com/Wei-Shaw/sub2api/internal/handler/admin" - "github.com/Wei-Shaw/sub2api/internal/infrastructure" "github.com/Wei-Shaw/sub2api/internal/repository" "github.com/Wei-Shaw/sub2api/internal/server" "github.com/Wei-Shaw/sub2api/internal/server/middleware" @@ -35,18 +34,18 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { if err != nil { return nil, err } - client, err := infrastructure.ProvideEnt(configConfig) + client, err := repository.ProvideEnt(configConfig) if err != nil { return nil, err } - db, err := infrastructure.ProvideSQLDB(client) + db, err := repository.ProvideSQLDB(client) if err != nil { return nil, err } userRepository := repository.NewUserRepository(client, db) settingRepository := repository.NewSettingRepository(client) settingService := service.NewSettingService(settingRepository, configConfig) - redisClient := infrastructure.ProvideRedis(configConfig) + redisClient := repository.ProvideRedis(configConfig) emailCache := repository.NewEmailCache(redisClient) emailService := service.NewEmailService(settingRepository, emailCache) turnstileVerifier := repository.NewTurnstileVerifier() diff --git a/backend/internal/infrastructure/wire.go b/backend/internal/infrastructure/wire.go deleted file mode 100644 index 1e64640c..00000000 --- a/backend/internal/infrastructure/wire.go +++ /dev/null @@ -1,79 +0,0 @@ -package infrastructure - -import ( - "database/sql" - "errors" - - "github.com/Wei-Shaw/sub2api/ent" - "github.com/Wei-Shaw/sub2api/internal/config" - - "github.com/google/wire" - "github.com/redis/go-redis/v9" - - entsql "entgo.io/ent/dialect/sql" -) - -// ProviderSet 是基础设施层的 Wire 依赖提供者集合。 -// -// Wire 是 Google 开发的编译时依赖注入工具。ProviderSet 将相关的依赖提供函数 -// 组织在一起,便于在应用程序启动时自动组装依赖关系。 -// -// 包含的提供者: -// - ProvideEnt: 提供 Ent ORM 客户端 -// - ProvideSQLDB: 提供底层 SQL 数据库连接 -// - ProvideRedis: 提供 Redis 客户端 -var ProviderSet = wire.NewSet( - ProvideEnt, - ProvideSQLDB, - ProvideRedis, -) - -// ProvideEnt 为依赖注入提供 Ent 客户端。 -// -// 该函数是 InitEnt 的包装器,符合 Wire 的依赖提供函数签名要求。 -// Wire 会在编译时分析依赖关系,自动生成初始化代码。 -// -// 依赖:config.Config -// 提供:*ent.Client -func ProvideEnt(cfg *config.Config) (*ent.Client, error) { - client, _, err := InitEnt(cfg) - return client, err -} - -// ProvideSQLDB 从 Ent 客户端提取底层的 *sql.DB 连接。 -// -// 某些 Repository 需要直接执行原生 SQL(如复杂的批量更新、聚合查询), -// 此时需要访问底层的 sql.DB 而不是通过 Ent ORM。 -// -// 设计说明: -// - Ent 底层使用 sql.DB,通过 Driver 接口可以访问 -// - 这种设计允许在同一事务中混用 Ent 和原生 SQL -// -// 依赖:*ent.Client -// 提供:*sql.DB -func ProvideSQLDB(client *ent.Client) (*sql.DB, error) { - if client == nil { - return nil, errors.New("nil ent client") - } - // 从 Ent 客户端获取底层驱动 - drv, ok := client.Driver().(*entsql.Driver) - if !ok { - return nil, errors.New("ent driver does not expose *sql.DB") - } - // 返回驱动持有的 sql.DB 实例 - return drv.DB(), nil -} - -// ProvideRedis 为依赖注入提供 Redis 客户端。 -// -// Redis 用于: -// - 分布式锁(如并发控制) -// - 缓存(如用户会话、API 响应缓存) -// - 速率限制 -// - 实时统计数据 -// -// 依赖:config.Config -// 提供:*redis.Client -func ProvideRedis(cfg *config.Config) *redis.Client { - return InitRedis(cfg) -} diff --git a/backend/internal/infrastructure/errors/errors.go b/backend/internal/pkg/errors/errors.go similarity index 100% rename from backend/internal/infrastructure/errors/errors.go rename to backend/internal/pkg/errors/errors.go diff --git a/backend/internal/infrastructure/errors/errors_test.go b/backend/internal/pkg/errors/errors_test.go similarity index 100% rename from backend/internal/infrastructure/errors/errors_test.go rename to backend/internal/pkg/errors/errors_test.go diff --git a/backend/internal/infrastructure/errors/http.go b/backend/internal/pkg/errors/http.go similarity index 100% rename from backend/internal/infrastructure/errors/http.go rename to backend/internal/pkg/errors/http.go diff --git a/backend/internal/infrastructure/errors/types.go b/backend/internal/pkg/errors/types.go similarity index 100% rename from backend/internal/infrastructure/errors/types.go rename to backend/internal/pkg/errors/types.go diff --git a/backend/internal/pkg/response/response.go b/backend/internal/pkg/response/response.go index e26d2531..87dc4264 100644 --- a/backend/internal/pkg/response/response.go +++ b/backend/internal/pkg/response/response.go @@ -4,7 +4,7 @@ import ( "math" "net/http" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/gin-gonic/gin" ) diff --git a/backend/internal/pkg/response/response_test.go b/backend/internal/pkg/response/response_test.go index 13b184af..ef31ca3c 100644 --- a/backend/internal/pkg/response/response_test.go +++ b/backend/internal/pkg/response/response_test.go @@ -9,7 +9,7 @@ import ( "net/http/httptest" "testing" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + errors2 "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" ) @@ -82,7 +82,7 @@ func TestErrorFrom(t *testing.T) { }, { name: "application_error", - err: infraerrors.Forbidden("FORBIDDEN", "no access").WithMetadata(map[string]string{"scope": "admin"}), + err: errors2.Forbidden("FORBIDDEN", "no access").WithMetadata(map[string]string{"scope": "admin"}), wantWritten: true, wantHTTPCode: http.StatusForbidden, wantBody: Response{ @@ -94,7 +94,7 @@ func TestErrorFrom(t *testing.T) { }, { name: "bad_request_error", - err: infraerrors.BadRequest("INVALID_REQUEST", "invalid request"), + err: errors2.BadRequest("INVALID_REQUEST", "invalid request"), wantWritten: true, wantHTTPCode: http.StatusBadRequest, wantBody: Response{ @@ -105,7 +105,7 @@ func TestErrorFrom(t *testing.T) { }, { name: "unauthorized_error", - err: infraerrors.Unauthorized("UNAUTHORIZED", "unauthorized"), + err: errors2.Unauthorized("UNAUTHORIZED", "unauthorized"), wantWritten: true, wantHTTPCode: http.StatusUnauthorized, wantBody: Response{ @@ -116,7 +116,7 @@ func TestErrorFrom(t *testing.T) { }, { name: "not_found_error", - err: infraerrors.NotFound("NOT_FOUND", "not found"), + err: errors2.NotFound("NOT_FOUND", "not found"), wantWritten: true, wantHTTPCode: http.StatusNotFound, wantBody: Response{ @@ -127,7 +127,7 @@ func TestErrorFrom(t *testing.T) { }, { name: "conflict_error", - err: infraerrors.Conflict("CONFLICT", "conflict"), + err: errors2.Conflict("CONFLICT", "conflict"), wantWritten: true, wantHTTPCode: http.StatusConflict, wantBody: Response{ @@ -143,7 +143,7 @@ func TestErrorFrom(t *testing.T) { wantHTTPCode: http.StatusInternalServerError, wantBody: Response{ Code: http.StatusInternalServerError, - Message: infraerrors.UnknownMessage, + Message: errors2.UnknownMessage, }, }, } diff --git a/backend/internal/infrastructure/db_pool.go b/backend/internal/repository/db_pool.go similarity index 97% rename from backend/internal/infrastructure/db_pool.go rename to backend/internal/repository/db_pool.go index 612155bf..d7116ab1 100644 --- a/backend/internal/infrastructure/db_pool.go +++ b/backend/internal/repository/db_pool.go @@ -1,4 +1,4 @@ -package infrastructure +package repository import ( "database/sql" diff --git a/backend/internal/infrastructure/db_pool_test.go b/backend/internal/repository/db_pool_test.go similarity index 98% rename from backend/internal/infrastructure/db_pool_test.go rename to backend/internal/repository/db_pool_test.go index 0f0e9716..3868106a 100644 --- a/backend/internal/infrastructure/db_pool_test.go +++ b/backend/internal/repository/db_pool_test.go @@ -1,4 +1,4 @@ -package infrastructure +package repository import ( "database/sql" diff --git a/backend/internal/infrastructure/ent.go b/backend/internal/repository/ent.go similarity index 99% rename from backend/internal/infrastructure/ent.go rename to backend/internal/repository/ent.go index b1ab9a55..d457ba72 100644 --- a/backend/internal/infrastructure/ent.go +++ b/backend/internal/repository/ent.go @@ -1,6 +1,6 @@ // Package infrastructure 提供应用程序的基础设施层组件。 // 包括数据库连接初始化、ORM 客户端管理、Redis 连接、数据库迁移等核心功能。 -package infrastructure +package repository import ( "context" diff --git a/backend/internal/repository/error_translate.go b/backend/internal/repository/error_translate.go index 192f9261..b8065ffe 100644 --- a/backend/internal/repository/error_translate.go +++ b/backend/internal/repository/error_translate.go @@ -7,7 +7,7 @@ import ( "strings" dbent "github.com/Wei-Shaw/sub2api/ent" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/lib/pq" ) diff --git a/backend/internal/repository/integration_harness_test.go b/backend/internal/repository/integration_harness_test.go index 6ef447e1..fb9c26c4 100644 --- a/backend/internal/repository/integration_harness_test.go +++ b/backend/internal/repository/integration_harness_test.go @@ -17,7 +17,6 @@ import ( dbent "github.com/Wei-Shaw/sub2api/ent" _ "github.com/Wei-Shaw/sub2api/ent/runtime" - "github.com/Wei-Shaw/sub2api/internal/infrastructure" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -97,7 +96,7 @@ func TestMain(m *testing.M) { log.Printf("failed to open sql db: %v", err) os.Exit(1) } - if err := infrastructure.ApplyMigrations(ctx, integrationDB); err != nil { + if err := ApplyMigrations(ctx, integrationDB); err != nil { log.Printf("failed to apply db migrations: %v", err) os.Exit(1) } diff --git a/backend/internal/infrastructure/migrations_runner.go b/backend/internal/repository/migrations_runner.go similarity index 99% rename from backend/internal/infrastructure/migrations_runner.go rename to backend/internal/repository/migrations_runner.go index 8477c031..e556b9ce 100644 --- a/backend/internal/infrastructure/migrations_runner.go +++ b/backend/internal/repository/migrations_runner.go @@ -1,4 +1,4 @@ -package infrastructure +package repository import ( "context" diff --git a/backend/internal/repository/migrations_schema_integration_test.go b/backend/internal/repository/migrations_schema_integration_test.go index 49d96445..4c7848b2 100644 --- a/backend/internal/repository/migrations_schema_integration_test.go +++ b/backend/internal/repository/migrations_schema_integration_test.go @@ -7,7 +7,6 @@ import ( "database/sql" "testing" - "github.com/Wei-Shaw/sub2api/internal/infrastructure" "github.com/stretchr/testify/require" ) @@ -15,7 +14,7 @@ func TestMigrationsRunner_IsIdempotent_AndSchemaIsUpToDate(t *testing.T) { tx := testTx(t) // Re-apply migrations to verify idempotency (no errors, no duplicate rows). - require.NoError(t, infrastructure.ApplyMigrations(context.Background(), integrationDB)) + require.NoError(t, ApplyMigrations(context.Background(), integrationDB)) // schema_migrations should have at least the current migration set. var applied int diff --git a/backend/internal/infrastructure/redis.go b/backend/internal/repository/redis.go similarity index 98% rename from backend/internal/infrastructure/redis.go rename to backend/internal/repository/redis.go index 9f4c8770..f3606ad9 100644 --- a/backend/internal/infrastructure/redis.go +++ b/backend/internal/repository/redis.go @@ -1,4 +1,4 @@ -package infrastructure +package repository import ( "time" diff --git a/backend/internal/infrastructure/redis_test.go b/backend/internal/repository/redis_test.go similarity index 97% rename from backend/internal/infrastructure/redis_test.go rename to backend/internal/repository/redis_test.go index 5e38e826..756a63dc 100644 --- a/backend/internal/infrastructure/redis_test.go +++ b/backend/internal/repository/redis_test.go @@ -1,4 +1,4 @@ -package infrastructure +package repository import ( "testing" diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index edeaf782..2de2d1de 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -1,6 +1,11 @@ package repository import ( + "database/sql" + "errors" + + entsql "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/google/wire" @@ -47,4 +52,58 @@ var ProviderSet = wire.NewSet( NewOpenAIOAuthClient, NewGeminiOAuthClient, NewGeminiCliCodeAssistClient, + + ProvideEnt, + ProvideSQLDB, + ProvideRedis, ) + +// ProvideEnt 为依赖注入提供 Ent 客户端。 +// +// 该函数是 InitEnt 的包装器,符合 Wire 的依赖提供函数签名要求。 +// Wire 会在编译时分析依赖关系,自动生成初始化代码。 +// +// 依赖:config.Config +// 提供:*ent.Client +func ProvideEnt(cfg *config.Config) (*ent.Client, error) { + client, _, err := InitEnt(cfg) + return client, err +} + +// ProvideSQLDB 从 Ent 客户端提取底层的 *sql.DB 连接。 +// +// 某些 Repository 需要直接执行原生 SQL(如复杂的批量更新、聚合查询), +// 此时需要访问底层的 sql.DB 而不是通过 Ent ORM。 +// +// 设计说明: +// - Ent 底层使用 sql.DB,通过 Driver 接口可以访问 +// - 这种设计允许在同一事务中混用 Ent 和原生 SQL +// +// 依赖:*ent.Client +// 提供:*sql.DB +func ProvideSQLDB(client *ent.Client) (*sql.DB, error) { + if client == nil { + return nil, errors.New("nil ent client") + } + // 从 Ent 客户端获取底层驱动 + drv, ok := client.Driver().(*entsql.Driver) + if !ok { + return nil, errors.New("ent driver does not expose *sql.DB") + } + // 返回驱动持有的 sql.DB 实例 + return drv.DB(), nil +} + +// ProvideRedis 为依赖注入提供 Redis 客户端。 +// +// Redis 用于: +// - 分布式锁(如并发控制) +// - 缓存(如用户会话、API 响应缓存) +// - 速率限制 +// - 实时统计数据 +// +// 依赖:config.Config +// 提供:*redis.Client +func ProvideRedis(cfg *config.Config) *redis.Client { + return InitRedis(cfg) +} diff --git a/backend/internal/server/middleware/recovery.go b/backend/internal/server/middleware/recovery.go index 04ea6f9d..f05154d3 100644 --- a/backend/internal/server/middleware/recovery.go +++ b/backend/internal/server/middleware/recovery.go @@ -7,7 +7,7 @@ import ( "os" "strings" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/response" "github.com/gin-gonic/gin" ) diff --git a/backend/internal/server/middleware/recovery_test.go b/backend/internal/server/middleware/recovery_test.go index 5edb6da0..439f44cb 100644 --- a/backend/internal/server/middleware/recovery_test.go +++ b/backend/internal/server/middleware/recovery_test.go @@ -8,7 +8,7 @@ import ( "net/http/httptest" "testing" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/response" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index 05895c8b..3c5841bd 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" ) diff --git a/backend/internal/service/api_key_service.go b/backend/internal/service/api_key_service.go index facf997e..f22c383a 100644 --- a/backend/internal/service/api_key_service.go +++ b/backend/internal/service/api_key_service.go @@ -8,7 +8,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" ) diff --git a/backend/internal/service/auth_service.go b/backend/internal/service/auth_service.go index 54bbfa5c..69765520 100644 --- a/backend/internal/service/auth_service.go +++ b/backend/internal/service/auth_service.go @@ -8,7 +8,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/golang-jwt/jwt/v5" "golang.org/x/crypto/bcrypt" diff --git a/backend/internal/service/billing_cache_service.go b/backend/internal/service/billing_cache_service.go index 58ed555a..9cdeed7b 100644 --- a/backend/internal/service/billing_cache_service.go +++ b/backend/internal/service/billing_cache_service.go @@ -9,7 +9,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" ) // 错误定义 diff --git a/backend/internal/service/email_service.go b/backend/internal/service/email_service.go index 7b4db611..6537b01e 100644 --- a/backend/internal/service/email_service.go +++ b/backend/internal/service/email_service.go @@ -10,7 +10,7 @@ import ( "strconv" "time" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" ) var ( diff --git a/backend/internal/service/group_service.go b/backend/internal/service/group_service.go index 886c0a3a..403636e8 100644 --- a/backend/internal/service/group_service.go +++ b/backend/internal/service/group_service.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" ) diff --git a/backend/internal/service/proxy_service.go b/backend/internal/service/proxy_service.go index c074b13d..044f9ffc 100644 --- a/backend/internal/service/proxy_service.go +++ b/backend/internal/service/proxy_service.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" ) diff --git a/backend/internal/service/redeem_service.go b/backend/internal/service/redeem_service.go index 7b0b80f5..b6324235 100644 --- a/backend/internal/service/redeem_service.go +++ b/backend/internal/service/redeem_service.go @@ -10,7 +10,7 @@ import ( "time" dbent "github.com/Wei-Shaw/sub2api/ent" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" ) diff --git a/backend/internal/service/setting_service.go b/backend/internal/service/setting_service.go index 0ffe991d..b5786ece 100644 --- a/backend/internal/service/setting_service.go +++ b/backend/internal/service/setting_service.go @@ -9,7 +9,7 @@ import ( "strconv" "github.com/Wei-Shaw/sub2api/internal/config" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" ) var ( diff --git a/backend/internal/service/subscription_service.go b/backend/internal/service/subscription_service.go index f6aefb83..d960c86f 100644 --- a/backend/internal/service/subscription_service.go +++ b/backend/internal/service/subscription_service.go @@ -6,7 +6,7 @@ import ( "log" "time" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" ) diff --git a/backend/internal/service/turnstile_service.go b/backend/internal/service/turnstile_service.go index cfb87c57..4afcc335 100644 --- a/backend/internal/service/turnstile_service.go +++ b/backend/internal/service/turnstile_service.go @@ -5,7 +5,7 @@ import ( "fmt" "log" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" ) var ( diff --git a/backend/internal/service/usage_service.go b/backend/internal/service/usage_service.go index f653ddfe..e1e97671 100644 --- a/backend/internal/service/usage_service.go +++ b/backend/internal/service/usage_service.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" ) diff --git a/backend/internal/service/user_service.go b/backend/internal/service/user_service.go index c17588c6..44a94d32 100644 --- a/backend/internal/service/user_service.go +++ b/backend/internal/service/user_service.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" ) diff --git a/backend/internal/setup/setup.go b/backend/internal/setup/setup.go index 5565ab91..230d016f 100644 --- a/backend/internal/setup/setup.go +++ b/backend/internal/setup/setup.go @@ -11,7 +11,7 @@ import ( "strconv" "time" - "github.com/Wei-Shaw/sub2api/internal/infrastructure" + "github.com/Wei-Shaw/sub2api/internal/repository" "github.com/Wei-Shaw/sub2api/internal/service" _ "github.com/lib/pq" @@ -262,7 +262,7 @@ func initializeDatabase(cfg *SetupConfig) error { migrationCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() - return infrastructure.ApplyMigrations(migrationCtx, db) + return repository.ApplyMigrations(migrationCtx, db) } func createAdminUser(cfg *SetupConfig) error { From 8e55ee0e2ca9c5fd00e7afa5ded757bea43d2667 Mon Sep 17 00:00:00 2001 From: shaw Date: Wed, 31 Dec 2025 23:50:15 +0800 Subject: [PATCH 09/51] style: fix gofmt formatting in claude_types.go --- backend/internal/pkg/antigravity/claude_types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/pkg/antigravity/claude_types.go b/backend/internal/pkg/antigravity/claude_types.go index f394d7e3..01b805cd 100644 --- a/backend/internal/pkg/antigravity/claude_types.go +++ b/backend/internal/pkg/antigravity/claude_types.go @@ -41,7 +41,7 @@ type ClaudeMetadata struct { // 1. 标准格式: { "name": "...", "description": "...", "input_schema": {...} } // 2. Custom 格式 (MCP): { "type": "custom", "name": "...", "custom": { "description": "...", "input_schema": {...} } } type ClaudeTool struct { - Type string `json:"type,omitempty"` // "custom" 或空(标准格式) + Type string `json:"type,omitempty"` // "custom" 或空(标准格式) Name string `json:"name"` Description string `json:"description,omitempty"` // 标准格式使用 InputSchema map[string]any `json:"input_schema,omitempty"` // 标准格式使用 From 7f7bbdf67797510242f41654c151d62fd70eef02 Mon Sep 17 00:00:00 2001 From: song Date: Wed, 31 Dec 2025 21:16:32 +0800 Subject: [PATCH 10/51] =?UTF-8?q?refactor(antigravity):=20=E7=AE=80?= =?UTF-8?q?=E5=8C=96=E6=A8=A1=E5=9E=8B=E6=98=A0=E5=B0=84=E9=80=BB=E8=BE=91?= =?UTF-8?q?=EF=BC=8C=E6=94=AF=E6=8C=81=E5=89=8D=E7=BC=80=E5=8C=B9=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 删除精确映射表 antigravityModelMapping,统一使用前缀映射 - 前缀映射支持模型版本号变化(如 -20251111, -thinking, -preview) - 简化 IsModelSupported 函数,所有 claude-/gemini- 前缀模型都支持 - 添加跨协议测试用例:Claude 端点调用 Gemini 模型、Gemini 端点调用 Claude 模型 --- .../internal/integration/e2e_gateway_test.go | 59 ++++++++++++++ .../service/antigravity_gateway_service.go | 76 +++++++++---------- backend/internal/service/gateway_service.go | 20 +---- 3 files changed, 96 insertions(+), 59 deletions(-) diff --git a/backend/internal/integration/e2e_gateway_test.go b/backend/internal/integration/e2e_gateway_test.go index 05cdc85f..ec0b29f7 100644 --- a/backend/internal/integration/e2e_gateway_test.go +++ b/backend/internal/integration/e2e_gateway_test.go @@ -57,6 +57,7 @@ var geminiModels = []string{ "gemini-2.5-flash-lite", "gemini-3-flash", "gemini-3-pro-low", + "gemini-3-pro-high", } func TestMain(m *testing.M) { @@ -641,6 +642,37 @@ func testClaudeThinkingWithToolHistory(t *testing.T, model string) { t.Logf("✅ thinking 模式工具调用测试通过, id=%v", result["id"]) } +// TestClaudeMessagesWithGeminiModel 测试在 Claude 端点使用 Gemini 模型 +// 验证:通过 /v1/messages 端点传入 gemini 模型名的场景(含前缀映射) +// 仅在 Antigravity 模式下运行(ENDPOINT_PREFIX="/antigravity") +func TestClaudeMessagesWithGeminiModel(t *testing.T) { + if endpointPrefix != "/antigravity" { + t.Skip("仅在 Antigravity 模式下运行") + } + + // 测试通过 Claude 端点调用 Gemini 模型 + geminiViaClaude := []string{ + "gemini-3-flash", // 直接支持 + "gemini-3-pro-low", // 直接支持 + "gemini-3-pro-high", // 直接支持 + "gemini-3-pro", // 前缀映射 -> gemini-3-pro-high + "gemini-3-pro-preview", // 前缀映射 -> gemini-3-pro-high + } + + for i, model := range geminiViaClaude { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_通过Claude端点", func(t *testing.T) { + testClaudeMessage(t, model, false) + }) + time.Sleep(testInterval) + t.Run(model+"_通过Claude端点_流式", func(t *testing.T) { + testClaudeMessage(t, model, true) + }) + } +} + // TestClaudeMessagesWithNoSignature 测试历史 thinking block 不带 signature 的场景 // 验证:Gemini 模型接受没有 signature 的 thinking block func TestClaudeMessagesWithNoSignature(t *testing.T) { @@ -738,3 +770,30 @@ func testClaudeWithNoSignature(t *testing.T, model string) { } t.Logf("✅ 无 signature thinking 处理测试通过, id=%v", result["id"]) } + +// TestGeminiEndpointWithClaudeModel 测试通过 Gemini 端点调用 Claude 模型 +// 仅在 Antigravity 模式下运行(ENDPOINT_PREFIX="/antigravity") +func TestGeminiEndpointWithClaudeModel(t *testing.T) { + if endpointPrefix != "/antigravity" { + t.Skip("仅在 Antigravity 模式下运行") + } + + // 测试通过 Gemini 端点调用 Claude 模型 + claudeViaGemini := []string{ + "claude-sonnet-4-5", + "claude-opus-4-5-thinking", + } + + for i, model := range claudeViaGemini { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_通过Gemini端点", func(t *testing.T) { + testGeminiGenerate(t, model, false) + }) + time.Sleep(testInterval) + t.Run(model+"_通过Gemini端点_流式", func(t *testing.T) { + testGeminiGenerate(t, model, true) + }) + } +} diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index ae2976f8..52dbe263 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -25,7 +25,7 @@ const ( antigravityRetryMaxDelay = 16 * time.Second ) -// Antigravity 直接支持的模型 +// Antigravity 直接支持的模型(精确匹配透传) var antigravitySupportedModels = map[string]bool{ "claude-opus-4-5-thinking": true, "claude-sonnet-4-5": true, @@ -36,23 +36,26 @@ var antigravitySupportedModels = map[string]bool{ "gemini-3-flash": true, "gemini-3-pro-low": true, "gemini-3-pro-high": true, - "gemini-3-pro-preview": true, "gemini-3-pro-image": true, } -// Antigravity 系统默认模型映射表(不支持 → 支持) -var antigravityModelMapping = map[string]string{ - "claude-3-5-sonnet-20241022": "claude-sonnet-4-5", - "claude-3-5-sonnet-20240620": "claude-sonnet-4-5", - "claude-sonnet-4-5-20250929": "claude-sonnet-4-5-thinking", - "claude-opus-4": "claude-opus-4-5-thinking", - "claude-opus-4-5-20251101": "claude-opus-4-5-thinking", - "claude-haiku-4": "gemini-3-flash", - "claude-haiku-4-5": "gemini-3-flash", - "claude-3-haiku-20240307": "gemini-3-flash", - "claude-haiku-4-5-20251001": "gemini-3-flash", - // 生图模型:官方名 → Antigravity 内部名 - "gemini-3-pro-image-preview": "gemini-3-pro-image", +// Antigravity 前缀映射表(按前缀长度降序排列,确保最长匹配优先) +// 用于处理模型版本号变化(如 -20251111, -thinking, -preview 等后缀) +var antigravityPrefixMapping = []struct { + prefix string + target string +}{ + // 长前缀优先 + {"gemini-3-pro-image", "gemini-3-pro-image"}, // gemini-3-pro-image-preview 等 + {"claude-3-5-sonnet", "claude-sonnet-4-5"}, // 旧版 claude-3-5-sonnet-xxx + {"claude-sonnet-4-5", "claude-sonnet-4-5"}, // claude-sonnet-4-5-xxx + {"claude-haiku-4-5", "gemini-3-flash"}, // claude-haiku-4-5-xxx + {"claude-opus-4-5", "claude-opus-4-5-thinking"}, + {"claude-3-haiku", "gemini-3-flash"}, // 旧版 claude-3-haiku-xxx + {"claude-sonnet-4", "claude-sonnet-4-5"}, + {"claude-haiku-4", "gemini-3-flash"}, + {"claude-opus-4", "claude-opus-4-5-thinking"}, + {"gemini-3-pro", "gemini-3-pro-high"}, // gemini-3-pro, gemini-3-pro-preview 等 } // AntigravityGatewayService 处理 Antigravity 平台的 API 转发 @@ -84,24 +87,27 @@ func (s *AntigravityGatewayService) GetTokenProvider() *AntigravityTokenProvider } // getMappedModel 获取映射后的模型名 +// 逻辑:账户映射 → 直接支持透传 → 前缀映射 → gemini透传 → 默认值 func (s *AntigravityGatewayService) getMappedModel(account *Account, requestedModel string) string { - // 1. 优先使用账户级映射(复用现有方法) + // 1. 账户级映射(用户自定义优先) if mapped := account.GetMappedModel(requestedModel); mapped != requestedModel { return mapped } - // 2. 系统默认映射 - if mapped, ok := antigravityModelMapping[requestedModel]; ok { - return mapped - } - - // 3. Gemini 模型透传 - if strings.HasPrefix(requestedModel, "gemini-") { + // 2. 直接支持的模型透传 + if antigravitySupportedModels[requestedModel] { return requestedModel } - // 4. Claude 前缀透传直接支持的模型 - if antigravitySupportedModels[requestedModel] { + // 3. 前缀映射(处理版本号变化,如 -20251111, -thinking, -preview) + for _, pm := range antigravityPrefixMapping { + if strings.HasPrefix(requestedModel, pm.prefix) { + return pm.target + } + } + + // 4. Gemini 模型透传(未匹配到前缀的 gemini 模型) + if strings.HasPrefix(requestedModel, "gemini-") { return requestedModel } @@ -110,24 +116,10 @@ func (s *AntigravityGatewayService) getMappedModel(account *Account, requestedMo } // IsModelSupported 检查模型是否被支持 +// 所有 claude- 和 gemini- 前缀的模型都能通过映射或透传支持 func (s *AntigravityGatewayService) IsModelSupported(requestedModel string) bool { - // 直接支持的模型 - if antigravitySupportedModels[requestedModel] { - return true - } - // 可映射的模型 - if _, ok := antigravityModelMapping[requestedModel]; ok { - return true - } - // Gemini 前缀透传 - if strings.HasPrefix(requestedModel, "gemini-") { - return true - } - // Claude 模型支持(通过默认映射) - if strings.HasPrefix(requestedModel, "claude-") { - return true - } - return false + return strings.HasPrefix(requestedModel, "claude-") || + strings.HasPrefix(requestedModel, "gemini-") } // TestConnectionResult 测试连接结果 diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index d542e9c2..9874751d 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -515,24 +515,10 @@ func (s *GatewayService) isModelSupportedByAccount(account *Account, requestedMo } // IsAntigravityModelSupported 检查 Antigravity 平台是否支持指定模型 +// 所有 claude- 和 gemini- 前缀的模型都能通过映射或透传支持 func IsAntigravityModelSupported(requestedModel string) bool { - // 直接支持的模型 - if antigravitySupportedModels[requestedModel] { - return true - } - // 可映射的模型 - if _, ok := antigravityModelMapping[requestedModel]; ok { - return true - } - // Gemini 前缀透传 - if strings.HasPrefix(requestedModel, "gemini-") { - return true - } - // Claude 模型支持(通过默认映射到 claude-sonnet-4-5) - if strings.HasPrefix(requestedModel, "claude-") { - return true - } - return false + return strings.HasPrefix(requestedModel, "claude-") || + strings.HasPrefix(requestedModel, "gemini-") } // GetAccessToken 获取账号凭证 From 85485f1702d25c4d34c7a65533990b447ccb97ee Mon Sep 17 00:00:00 2001 From: song Date: Thu, 1 Jan 2026 01:59:25 +0800 Subject: [PATCH 11/51] style: fix gofmt formatting --- backend/internal/service/antigravity_gateway_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 52dbe263..e9225bff 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -51,7 +51,7 @@ var antigravityPrefixMapping = []struct { {"claude-sonnet-4-5", "claude-sonnet-4-5"}, // claude-sonnet-4-5-xxx {"claude-haiku-4-5", "gemini-3-flash"}, // claude-haiku-4-5-xxx {"claude-opus-4-5", "claude-opus-4-5-thinking"}, - {"claude-3-haiku", "gemini-3-flash"}, // 旧版 claude-3-haiku-xxx + {"claude-3-haiku", "gemini-3-flash"}, // 旧版 claude-3-haiku-xxx {"claude-sonnet-4", "claude-sonnet-4-5"}, {"claude-haiku-4", "gemini-3-flash"}, {"claude-opus-4", "claude-opus-4-5-thinking"}, From edee46e47f98d9e70c43fc4cddb4f203b31f1568 Mon Sep 17 00:00:00 2001 From: song Date: Thu, 1 Jan 2026 02:07:41 +0800 Subject: [PATCH 12/51] =?UTF-8?q?test:=20=E6=9B=B4=E6=96=B0=20model=20mapp?= =?UTF-8?q?ing=20=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B=E6=9C=9F=E6=9C=9B?= =?UTF-8?q?=E5=80=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/service/antigravity_model_mapping_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/internal/service/antigravity_model_mapping_test.go b/backend/internal/service/antigravity_model_mapping_test.go index b3631dfc..1e37cdc2 100644 --- a/backend/internal/service/antigravity_model_mapping_test.go +++ b/backend/internal/service/antigravity_model_mapping_test.go @@ -131,7 +131,7 @@ func TestAntigravityGatewayService_GetMappedModel(t *testing.T) { name: "系统映射 - claude-sonnet-4-5-20250929", requestedModel: "claude-sonnet-4-5-20250929", accountMapping: nil, - expected: "claude-sonnet-4-5-thinking", + expected: "claude-sonnet-4-5", }, // 3. Gemini 透传 From 592d2d097875a94f02978a14aa88bdfea0aa6c91 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 04:01:51 +0800 Subject: [PATCH 13/51] =?UTF-8?q?feat(gateway):=20=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E8=B4=9F=E8=BD=BD=E6=84=9F=E7=9F=A5=E7=9A=84=E8=B4=A6=E5=8F=B7?= =?UTF-8?q?=E8=B0=83=E5=BA=A6=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增调度配置:粘性会话排队、兜底排队、负载计算、槽位清理 - 实现账号级等待队列和批量负载查询(Redis Lua 脚本) - 三层选择策略:粘性会话优先 → 负载感知选择 → 兜底排队 - 后台定期清理过期槽位,防止资源泄漏 - 集成到所有网关处理器(Claude/Gemini/OpenAI) --- backend/cmd/server/wire_gen.go | 6 +- backend/internal/config/config.go | 42 ++ backend/internal/config/config_test.go | 49 ++- backend/internal/handler/gateway_handler.go | 108 ++++- backend/internal/handler/gateway_helper.go | 22 +- .../internal/handler/gemini_v1beta_handler.go | 51 ++- .../handler/openai_gateway_handler.go | 49 ++- .../internal/repository/concurrency_cache.go | 185 ++++++++- .../concurrency_cache_benchmark_test.go | 2 +- .../concurrency_cache_integration_test.go | 44 +- backend/internal/repository/wire.go | 9 +- .../internal/service/concurrency_service.go | 110 +++++ .../service/gateway_multiplatform_test.go | 54 +++ backend/internal/service/gateway_service.go | 387 +++++++++++++++++- .../service/openai_gateway_service.go | 260 ++++++++++++ backend/internal/service/wire.go | 11 +- 16 files changed, 1342 insertions(+), 47 deletions(-) diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index c4859383..e3498680 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -100,7 +100,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, antigravityTokenProvider, rateLimitService, httpUpstream) accountTestService := service.NewAccountTestService(accountRepository, oAuthService, openAIOAuthService, geminiTokenProvider, antigravityGatewayService, httpUpstream) concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig) - concurrencyService := service.NewConcurrencyService(concurrencyCache) + concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig) crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService) accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService) oAuthHandler := admin.NewOAuthHandler(oAuthService) @@ -128,10 +128,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { identityService := service.NewIdentityService(identityCache) timingWheelService := service.ProvideTimingWheelService() deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) - gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService) + gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService) geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService) gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService) - openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService) + openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index aeeddcb4..8c154a9d 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -3,6 +3,7 @@ package config import ( "fmt" "strings" + "time" "github.com/spf13/viper" ) @@ -119,6 +120,26 @@ type GatewayConfig struct { // ConcurrencySlotTTLMinutes: 并发槽位过期时间(分钟) // 应大于最长 LLM 请求时间,防止请求完成前槽位过期 ConcurrencySlotTTLMinutes int `mapstructure:"concurrency_slot_ttl_minutes"` + + // Scheduling: 账号调度相关配置 + Scheduling GatewaySchedulingConfig `mapstructure:"scheduling"` +} + +// GatewaySchedulingConfig accounts scheduling configuration. +type GatewaySchedulingConfig struct { + // 粘性会话排队配置 + StickySessionMaxWaiting int `mapstructure:"sticky_session_max_waiting"` + StickySessionWaitTimeout time.Duration `mapstructure:"sticky_session_wait_timeout"` + + // 兜底排队配置 + FallbackWaitTimeout time.Duration `mapstructure:"fallback_wait_timeout"` + FallbackMaxWaiting int `mapstructure:"fallback_max_waiting"` + + // 负载计算 + LoadBatchEnabled bool `mapstructure:"load_batch_enabled"` + + // 过期槽位清理周期(0 表示禁用) + SlotCleanupInterval time.Duration `mapstructure:"slot_cleanup_interval"` } func (s *ServerConfig) Address() string { @@ -323,6 +344,12 @@ func setDefaults() { viper.SetDefault("gateway.max_upstream_clients", 5000) viper.SetDefault("gateway.client_idle_ttl_seconds", 900) viper.SetDefault("gateway.concurrency_slot_ttl_minutes", 15) // 并发槽位过期时间(支持超长请求) + viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3) + viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 45*time.Second) + viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second) + viper.SetDefault("gateway.scheduling.fallback_max_waiting", 100) + viper.SetDefault("gateway.scheduling.load_batch_enabled", true) + viper.SetDefault("gateway.scheduling.slot_cleanup_interval", 30*time.Second) // TokenRefresh viper.SetDefault("token_refresh.enabled", true) @@ -411,6 +438,21 @@ func (c *Config) Validate() error { if c.Gateway.ConcurrencySlotTTLMinutes <= 0 { return fmt.Errorf("gateway.concurrency_slot_ttl_minutes must be positive") } + if c.Gateway.Scheduling.StickySessionMaxWaiting <= 0 { + return fmt.Errorf("gateway.scheduling.sticky_session_max_waiting must be positive") + } + if c.Gateway.Scheduling.StickySessionWaitTimeout <= 0 { + return fmt.Errorf("gateway.scheduling.sticky_session_wait_timeout must be positive") + } + if c.Gateway.Scheduling.FallbackWaitTimeout <= 0 { + return fmt.Errorf("gateway.scheduling.fallback_wait_timeout must be positive") + } + if c.Gateway.Scheduling.FallbackMaxWaiting <= 0 { + return fmt.Errorf("gateway.scheduling.fallback_max_waiting must be positive") + } + if c.Gateway.Scheduling.SlotCleanupInterval < 0 { + return fmt.Errorf("gateway.scheduling.slot_cleanup_interval must be non-negative") + } return nil } diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index 1f1becb8..6e722a54 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -1,6 +1,11 @@ package config -import "testing" +import ( + "testing" + "time" + + "github.com/spf13/viper" +) func TestNormalizeRunMode(t *testing.T) { tests := []struct { @@ -21,3 +26,45 @@ func TestNormalizeRunMode(t *testing.T) { } } } + +func TestLoadDefaultSchedulingConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 3 { + t.Fatalf("StickySessionMaxWaiting = %d, want 3", cfg.Gateway.Scheduling.StickySessionMaxWaiting) + } + if cfg.Gateway.Scheduling.StickySessionWaitTimeout != 45*time.Second { + t.Fatalf("StickySessionWaitTimeout = %v, want 45s", cfg.Gateway.Scheduling.StickySessionWaitTimeout) + } + if cfg.Gateway.Scheduling.FallbackWaitTimeout != 30*time.Second { + t.Fatalf("FallbackWaitTimeout = %v, want 30s", cfg.Gateway.Scheduling.FallbackWaitTimeout) + } + if cfg.Gateway.Scheduling.FallbackMaxWaiting != 100 { + t.Fatalf("FallbackMaxWaiting = %d, want 100", cfg.Gateway.Scheduling.FallbackMaxWaiting) + } + if !cfg.Gateway.Scheduling.LoadBatchEnabled { + t.Fatalf("LoadBatchEnabled = false, want true") + } + if cfg.Gateway.Scheduling.SlotCleanupInterval != 30*time.Second { + t.Fatalf("SlotCleanupInterval = %v, want 30s", cfg.Gateway.Scheduling.SlotCleanupInterval) + } +} + +func TestLoadSchedulingConfigFromEnv(t *testing.T) { + viper.Reset() + t.Setenv("GATEWAY_SCHEDULING_STICKY_SESSION_MAX_WAITING", "5") + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 5 { + t.Fatalf("StickySessionMaxWaiting = %d, want 5", cfg.Gateway.Scheduling.StickySessionMaxWaiting) + } +} diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index a2f833ff..769e6700 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -141,6 +141,10 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } else if apiKey.Group != nil { platform = apiKey.Group.Platform } + sessionKey := sessionHash + if platform == service.PlatformGemini && sessionHash != "" { + sessionKey = "gemini:" + sessionHash + } if platform == service.PlatformGemini { const maxAccountSwitches = 3 @@ -149,7 +153,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { lastFailoverStatus := 0 for { - account, err := h.geminiCompatService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) if err != nil { if len(failedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) @@ -158,9 +162,13 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } + account := selection.Account // 检查预热请求拦截(在账号选择后、转发前检查) if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } if reqStream { sendMockWarmupStream(c, reqModel) } else { @@ -170,11 +178,44 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } // 3. 获取账号并发槽位 - accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, reqStream, &streamStarted) - if err != nil { - log.Printf("Account concurrency acquire failed: %v", err) - h.handleConcurrencyError(c, err, "account", streamStarted) - return + accountReleaseFunc := selection.ReleaseFunc + var accountWaitRelease func() + if !selection.Acquired { + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) + return + } + accountWaitRelease = func() { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + + accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + &streamStarted, + ) + if err != nil { + if accountWaitRelease != nil { + accountWaitRelease() + } + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionKey, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } } // 转发请求 - 根据账号平台分流 @@ -187,6 +228,9 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } + if accountWaitRelease != nil { + accountWaitRelease() + } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { @@ -231,7 +275,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { for { // 选择支持该模型的账号 - account, err := h.gatewayService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) if err != nil { if len(failedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) @@ -240,9 +284,13 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } + account := selection.Account // 检查预热请求拦截(在账号选择后、转发前检查) if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } if reqStream { sendMockWarmupStream(c, reqModel) } else { @@ -252,11 +300,44 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } // 3. 获取账号并发槽位 - accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, reqStream, &streamStarted) - if err != nil { - log.Printf("Account concurrency acquire failed: %v", err) - h.handleConcurrencyError(c, err, "account", streamStarted) - return + accountReleaseFunc := selection.ReleaseFunc + var accountWaitRelease func() + if !selection.Acquired { + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) + return + } + accountWaitRelease = func() { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + + accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + &streamStarted, + ) + if err != nil { + if accountWaitRelease != nil { + accountWaitRelease() + } + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionKey, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } } // 转发请求 - 根据账号平台分流 @@ -269,6 +350,9 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } + if accountWaitRelease != nil { + accountWaitRelease() + } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { diff --git a/backend/internal/handler/gateway_helper.go b/backend/internal/handler/gateway_helper.go index 4c7bd0f0..4e049dbb 100644 --- a/backend/internal/handler/gateway_helper.go +++ b/backend/internal/handler/gateway_helper.go @@ -83,6 +83,16 @@ func (h *ConcurrencyHelper) DecrementWaitCount(ctx context.Context, userID int64 h.concurrencyService.DecrementWaitCount(ctx, userID) } +// IncrementAccountWaitCount increments the wait count for an account +func (h *ConcurrencyHelper) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + return h.concurrencyService.IncrementAccountWaitCount(ctx, accountID, maxWait) +} + +// DecrementAccountWaitCount decrements the wait count for an account +func (h *ConcurrencyHelper) DecrementAccountWaitCount(ctx context.Context, accountID int64) { + h.concurrencyService.DecrementAccountWaitCount(ctx, accountID) +} + // AcquireUserSlotWithWait acquires a user concurrency slot, waiting if necessary. // For streaming requests, sends ping events during the wait. // streamStarted is updated if streaming response has begun. @@ -126,7 +136,12 @@ func (h *ConcurrencyHelper) AcquireAccountSlotWithWait(c *gin.Context, accountID // waitForSlotWithPing waits for a concurrency slot, sending ping events for streaming requests. // streamStarted pointer is updated when streaming begins (for proper error handling by caller). func (h *ConcurrencyHelper) waitForSlotWithPing(c *gin.Context, slotType string, id int64, maxConcurrency int, isStream bool, streamStarted *bool) (func(), error) { - ctx, cancel := context.WithTimeout(c.Request.Context(), maxConcurrencyWait) + return h.waitForSlotWithPingTimeout(c, slotType, id, maxConcurrency, maxConcurrencyWait, isStream, streamStarted) +} + +// waitForSlotWithPingTimeout waits for a concurrency slot with a custom timeout. +func (h *ConcurrencyHelper) waitForSlotWithPingTimeout(c *gin.Context, slotType string, id int64, maxConcurrency int, timeout time.Duration, isStream bool, streamStarted *bool) (func(), error) { + ctx, cancel := context.WithTimeout(c.Request.Context(), timeout) defer cancel() // Determine if ping is needed (streaming + ping format defined) @@ -200,6 +215,11 @@ func (h *ConcurrencyHelper) waitForSlotWithPing(c *gin.Context, slotType string, } } +// AcquireAccountSlotWithWaitTimeout acquires an account slot with a custom timeout (keeps SSE ping). +func (h *ConcurrencyHelper) AcquireAccountSlotWithWaitTimeout(c *gin.Context, accountID int64, maxConcurrency int, timeout time.Duration, isStream bool, streamStarted *bool) (func(), error) { + return h.waitForSlotWithPingTimeout(c, "account", accountID, maxConcurrency, timeout, isStream, streamStarted) +} + // nextBackoff 计算下一次退避时间 // 性能优化:使用指数退避 + 随机抖动,避免惊群效应 // current: 当前退避时间 diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 4e99e00d..1959c0f3 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -197,13 +197,17 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { // 3) select account (sticky session based on request body) parsedReq, _ := service.ParseGatewayRequest(body) sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) + sessionKey := sessionHash + if sessionHash != "" { + sessionKey = "gemini:" + sessionHash + } const maxAccountSwitches = 3 switchCount := 0 failedAccountIDs := make(map[int64]struct{}) lastFailoverStatus := 0 for { - account, err := h.geminiCompatService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, modelName, failedAccountIDs) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, modelName, failedAccountIDs) if err != nil { if len(failedAccountIDs) == 0 { googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) @@ -212,12 +216,46 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { handleGeminiFailoverExhausted(c, lastFailoverStatus) return } + account := selection.Account // 4) account concurrency slot - accountReleaseFunc, err := geminiConcurrency.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, stream, &streamStarted) - if err != nil { - googleError(c, http.StatusTooManyRequests, err.Error()) - return + accountReleaseFunc := selection.ReleaseFunc + var accountWaitRelease func() + if !selection.Acquired { + if selection.WaitPlan == nil { + googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts") + return + } + canWait, err := geminiConcurrency.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later") + return + } + accountWaitRelease = func() { + geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + + accountReleaseFunc, err = geminiConcurrency.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + stream, + &streamStarted, + ) + if err != nil { + if accountWaitRelease != nil { + accountWaitRelease() + } + googleError(c, http.StatusTooManyRequests, err.Error()) + return + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionKey, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } } // 5) forward (根据平台分流) @@ -230,6 +268,9 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } + if accountWaitRelease != nil { + accountWaitRelease() + } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 7c9934c6..c6b969bc 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -146,7 +146,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { for { // Select account supporting the requested model log.Printf("[OpenAI Handler] Selecting account: groupID=%v model=%s", apiKey.GroupID, reqModel) - account, err := h.gatewayService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) if err != nil { log.Printf("[OpenAI Handler] SelectAccount failed: %v", err) if len(failedAccountIDs) == 0 { @@ -156,14 +156,48 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } + account := selection.Account log.Printf("[OpenAI Handler] Selected account: id=%d name=%s", account.ID, account.Name) // 3. Acquire account concurrency slot - accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, reqStream, &streamStarted) - if err != nil { - log.Printf("Account concurrency acquire failed: %v", err) - h.handleConcurrencyError(c, err, "account", streamStarted) - return + accountReleaseFunc := selection.ReleaseFunc + var accountWaitRelease func() + if !selection.Acquired { + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) + return + } + accountWaitRelease = func() { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + + accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + &streamStarted, + ) + if err != nil { + if accountWaitRelease != nil { + accountWaitRelease() + } + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionHash, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } } // Forward request @@ -171,6 +205,9 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } + if accountWaitRelease != nil { + accountWaitRelease() + } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { diff --git a/backend/internal/repository/concurrency_cache.go b/backend/internal/repository/concurrency_cache.go index 9205230b..d8d6989b 100644 --- a/backend/internal/repository/concurrency_cache.go +++ b/backend/internal/repository/concurrency_cache.go @@ -2,7 +2,9 @@ package repository import ( "context" + "errors" "fmt" + "strconv" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/redis/go-redis/v9" @@ -27,6 +29,8 @@ const ( userSlotKeyPrefix = "concurrency:user:" // 等待队列计数器格式: concurrency:wait:{userID} waitQueueKeyPrefix = "concurrency:wait:" + // 账号级等待队列计数器格式: wait:account:{accountID} + accountWaitKeyPrefix = "wait:account:" // 默认槽位过期时间(分钟),可通过配置覆盖 defaultSlotTTLMinutes = 15 @@ -112,33 +116,112 @@ var ( redis.call('EXPIRE', KEYS[1], ARGV[2]) end - return 1 - `) + return 1 + `) + + // incrementAccountWaitScript - account-level wait queue count + incrementAccountWaitScript = redis.NewScript(` + local current = redis.call('GET', KEYS[1]) + if current == false then + current = 0 + else + current = tonumber(current) + end + + if current >= tonumber(ARGV[1]) then + return 0 + end + + local newVal = redis.call('INCR', KEYS[1]) + + -- Only set TTL on first creation to avoid refreshing zombie data + if newVal == 1 then + redis.call('EXPIRE', KEYS[1], ARGV[2]) + end + + return 1 + `) // decrementWaitScript - same as before decrementWaitScript = redis.NewScript(` - local current = redis.call('GET', KEYS[1]) - if current ~= false and tonumber(current) > 0 then - redis.call('DECR', KEYS[1]) - end - return 1 - `) + local current = redis.call('GET', KEYS[1]) + if current ~= false and tonumber(current) > 0 then + redis.call('DECR', KEYS[1]) + end + return 1 + `) + + // getAccountsLoadBatchScript - batch load query (read-only) + // ARGV[1] = slot TTL (seconds, retained for compatibility) + // ARGV[2..n] = accountID1, maxConcurrency1, accountID2, maxConcurrency2, ... + getAccountsLoadBatchScript = redis.NewScript(` + local result = {} + + local i = 2 + while i <= #ARGV do + local accountID = ARGV[i] + local maxConcurrency = tonumber(ARGV[i + 1]) + + local slotKey = 'concurrency:account:' .. accountID + local currentConcurrency = redis.call('ZCARD', slotKey) + + local waitKey = 'wait:account:' .. accountID + local waitingCount = redis.call('GET', waitKey) + if waitingCount == false then + waitingCount = 0 + else + waitingCount = tonumber(waitingCount) + end + + local loadRate = 0 + if maxConcurrency > 0 then + loadRate = math.floor((currentConcurrency + waitingCount) * 100 / maxConcurrency) + end + + table.insert(result, accountID) + table.insert(result, currentConcurrency) + table.insert(result, waitingCount) + table.insert(result, loadRate) + + i = i + 2 + end + + return result + `) + + // cleanupExpiredSlotsScript - remove expired slots + // KEYS[1] = concurrency:account:{accountID} + // ARGV[1] = TTL (seconds) + cleanupExpiredSlotsScript = redis.NewScript(` + local key = KEYS[1] + local ttl = tonumber(ARGV[1]) + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + local expireBefore = now - ttl + return redis.call('ZREMRANGEBYSCORE', key, '-inf', expireBefore) + `) ) type concurrencyCache struct { - rdb *redis.Client - slotTTLSeconds int // 槽位过期时间(秒) + rdb *redis.Client + slotTTLSeconds int // 槽位过期时间(秒) + waitQueueTTLSeconds int // 等待队列过期时间(秒) } // NewConcurrencyCache 创建并发控制缓存 // slotTTLMinutes: 槽位过期时间(分钟),0 或负数使用默认值 15 分钟 -func NewConcurrencyCache(rdb *redis.Client, slotTTLMinutes int) service.ConcurrencyCache { +// waitQueueTTLSeconds: 等待队列过期时间(秒),0 或负数使用 slot TTL +func NewConcurrencyCache(rdb *redis.Client, slotTTLMinutes int, waitQueueTTLSeconds int) service.ConcurrencyCache { if slotTTLMinutes <= 0 { slotTTLMinutes = defaultSlotTTLMinutes } + if waitQueueTTLSeconds <= 0 { + waitQueueTTLSeconds = slotTTLMinutes * 60 + } return &concurrencyCache{ - rdb: rdb, - slotTTLSeconds: slotTTLMinutes * 60, + rdb: rdb, + slotTTLSeconds: slotTTLMinutes * 60, + waitQueueTTLSeconds: waitQueueTTLSeconds, } } @@ -155,6 +238,10 @@ func waitQueueKey(userID int64) string { return fmt.Sprintf("%s%d", waitQueueKeyPrefix, userID) } +func accountWaitKey(accountID int64) string { + return fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) +} + // Account slot operations func (c *concurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { @@ -225,3 +312,75 @@ func (c *concurrencyCache) DecrementWaitCount(ctx context.Context, userID int64) _, err := decrementWaitScript.Run(ctx, c.rdb, []string{key}).Result() return err } + +// Account wait queue operations + +func (c *concurrencyCache) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + key := accountWaitKey(accountID) + result, err := incrementAccountWaitScript.Run(ctx, c.rdb, []string{key}, maxWait, c.waitQueueTTLSeconds).Int() + if err != nil { + return false, err + } + return result == 1, nil +} + +func (c *concurrencyCache) DecrementAccountWaitCount(ctx context.Context, accountID int64) error { + key := accountWaitKey(accountID) + _, err := decrementWaitScript.Run(ctx, c.rdb, []string{key}).Result() + return err +} + +func (c *concurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + key := accountWaitKey(accountID) + val, err := c.rdb.Get(ctx, key).Int() + if err != nil && !errors.Is(err, redis.Nil) { + return 0, err + } + if errors.Is(err, redis.Nil) { + return 0, nil + } + return val, nil +} + +func (c *concurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []service.AccountWithConcurrency) (map[int64]*service.AccountLoadInfo, error) { + if len(accounts) == 0 { + return map[int64]*service.AccountLoadInfo{}, nil + } + + args := []interface{}{c.slotTTLSeconds} + for _, acc := range accounts { + args = append(args, acc.ID, acc.MaxConcurrency) + } + + result, err := getAccountsLoadBatchScript.Run(ctx, c.rdb, []string{}, args...).Slice() + if err != nil { + return nil, err + } + + loadMap := make(map[int64]*service.AccountLoadInfo) + for i := 0; i < len(result); i += 4 { + if i+3 >= len(result) { + break + } + + accountID, _ := strconv.ParseInt(fmt.Sprintf("%v", result[i]), 10, 64) + currentConcurrency, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+1])) + waitingCount, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+2])) + loadRate, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+3])) + + loadMap[accountID] = &service.AccountLoadInfo{ + AccountID: accountID, + CurrentConcurrency: currentConcurrency, + WaitingCount: waitingCount, + LoadRate: loadRate, + } + } + + return loadMap, nil +} + +func (c *concurrencyCache) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { + key := accountSlotKey(accountID) + _, err := cleanupExpiredSlotsScript.Run(ctx, c.rdb, []string{key}, c.slotTTLSeconds).Result() + return err +} diff --git a/backend/internal/repository/concurrency_cache_benchmark_test.go b/backend/internal/repository/concurrency_cache_benchmark_test.go index cafab9cb..25697ab1 100644 --- a/backend/internal/repository/concurrency_cache_benchmark_test.go +++ b/backend/internal/repository/concurrency_cache_benchmark_test.go @@ -22,7 +22,7 @@ func BenchmarkAccountConcurrency(b *testing.B) { _ = rdb.Close() }() - cache, _ := NewConcurrencyCache(rdb, benchSlotTTLMinutes).(*concurrencyCache) + cache, _ := NewConcurrencyCache(rdb, benchSlotTTLMinutes, int(benchSlotTTL.Seconds())).(*concurrencyCache) ctx := context.Background() for _, size := range []int{10, 100, 1000} { diff --git a/backend/internal/repository/concurrency_cache_integration_test.go b/backend/internal/repository/concurrency_cache_integration_test.go index 6a7c83f4..f3d70ef1 100644 --- a/backend/internal/repository/concurrency_cache_integration_test.go +++ b/backend/internal/repository/concurrency_cache_integration_test.go @@ -27,7 +27,7 @@ type ConcurrencyCacheSuite struct { func (s *ConcurrencyCacheSuite) SetupTest() { s.IntegrationRedisSuite.SetupTest() - s.cache = NewConcurrencyCache(s.rdb, testSlotTTLMinutes) + s.cache = NewConcurrencyCache(s.rdb, testSlotTTLMinutes, int(testSlotTTL.Seconds())) } func (s *ConcurrencyCacheSuite) TestAccountSlot_AcquireAndRelease() { @@ -218,6 +218,48 @@ func (s *ConcurrencyCacheSuite) TestWaitQueue_DecrementNoNegative() { require.GreaterOrEqual(s.T(), val, 0, "expected non-negative wait count") } +func (s *ConcurrencyCacheSuite) TestAccountWaitQueue_IncrementAndDecrement() { + accountID := int64(30) + waitKey := fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) + + ok, err := s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) + require.NoError(s.T(), err, "IncrementAccountWaitCount 1") + require.True(s.T(), ok) + + ok, err = s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) + require.NoError(s.T(), err, "IncrementAccountWaitCount 2") + require.True(s.T(), ok) + + ok, err = s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) + require.NoError(s.T(), err, "IncrementAccountWaitCount 3") + require.False(s.T(), ok, "expected account wait increment over max to fail") + + ttl, err := s.rdb.TTL(s.ctx, waitKey).Result() + require.NoError(s.T(), err, "TTL account waitKey") + s.AssertTTLWithin(ttl, 1*time.Second, testSlotTTL) + + require.NoError(s.T(), s.cache.DecrementAccountWaitCount(s.ctx, accountID), "DecrementAccountWaitCount") + + val, err := s.rdb.Get(s.ctx, waitKey).Int() + if !errors.Is(err, redis.Nil) { + require.NoError(s.T(), err, "Get waitKey") + } + require.Equal(s.T(), 1, val, "expected account wait count 1") +} + +func (s *ConcurrencyCacheSuite) TestAccountWaitQueue_DecrementNoNegative() { + accountID := int64(301) + waitKey := fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) + + require.NoError(s.T(), s.cache.DecrementAccountWaitCount(s.ctx, accountID), "DecrementAccountWaitCount on non-existent key") + + val, err := s.rdb.Get(s.ctx, waitKey).Int() + if !errors.Is(err, redis.Nil) { + require.NoError(s.T(), err, "Get waitKey") + } + require.GreaterOrEqual(s.T(), val, 0, "expected non-negative account wait count after decrement on empty") +} + func (s *ConcurrencyCacheSuite) TestGetAccountConcurrency_Missing() { // When no slots exist, GetAccountConcurrency should return 0 cur, err := s.cache.GetAccountConcurrency(s.ctx, 999) diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index edeaf782..f1a8d4cf 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -10,7 +10,14 @@ import ( // ProvideConcurrencyCache 创建并发控制缓存,从配置读取 TTL 参数 // 性能优化:TTL 可配置,支持长时间运行的 LLM 请求场景 func ProvideConcurrencyCache(rdb *redis.Client, cfg *config.Config) service.ConcurrencyCache { - return NewConcurrencyCache(rdb, cfg.Gateway.ConcurrencySlotTTLMinutes) + waitTTLSeconds := int(cfg.Gateway.Scheduling.StickySessionWaitTimeout.Seconds()) + if cfg.Gateway.Scheduling.FallbackWaitTimeout > cfg.Gateway.Scheduling.StickySessionWaitTimeout { + waitTTLSeconds = int(cfg.Gateway.Scheduling.FallbackWaitTimeout.Seconds()) + } + if waitTTLSeconds <= 0 { + waitTTLSeconds = cfg.Gateway.ConcurrencySlotTTLMinutes * 60 + } + return NewConcurrencyCache(rdb, cfg.Gateway.ConcurrencySlotTTLMinutes, waitTTLSeconds) } // ProviderSet is the Wire provider set for all repositories diff --git a/backend/internal/service/concurrency_service.go b/backend/internal/service/concurrency_service.go index b5229491..65ef16db 100644 --- a/backend/internal/service/concurrency_service.go +++ b/backend/internal/service/concurrency_service.go @@ -18,6 +18,11 @@ type ConcurrencyCache interface { ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error GetAccountConcurrency(ctx context.Context, accountID int64) (int, error) + // 账号等待队列(账号级) + IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) + DecrementAccountWaitCount(ctx context.Context, accountID int64) error + GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) + // 用户槽位管理 // 键格式: concurrency:user:{userID}(有序集合,成员为 requestID) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) @@ -27,6 +32,12 @@ type ConcurrencyCache interface { // 等待队列计数(只在首次创建时设置 TTL) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) DecrementWaitCount(ctx context.Context, userID int64) error + + // 批量负载查询(只读) + GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) + + // 清理过期槽位(后台任务) + CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error } // generateRequestID generates a unique request ID for concurrency slot tracking @@ -61,6 +72,18 @@ type AcquireResult struct { ReleaseFunc func() // Must be called when done (typically via defer) } +type AccountWithConcurrency struct { + ID int64 + MaxConcurrency int +} + +type AccountLoadInfo struct { + AccountID int64 + CurrentConcurrency int + WaitingCount int + LoadRate int // 0-100+ (percent) +} + // AcquireAccountSlot attempts to acquire a concurrency slot for an account. // If the account is at max concurrency, it waits until a slot is available or timeout. // Returns a release function that MUST be called when the request completes. @@ -177,6 +200,42 @@ func (s *ConcurrencyService) DecrementWaitCount(ctx context.Context, userID int6 } } +// IncrementAccountWaitCount increments the wait queue counter for an account. +func (s *ConcurrencyService) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + if s.cache == nil { + return true, nil + } + + result, err := s.cache.IncrementAccountWaitCount(ctx, accountID, maxWait) + if err != nil { + log.Printf("Warning: increment wait count failed for account %d: %v", accountID, err) + return true, nil + } + return result, nil +} + +// DecrementAccountWaitCount decrements the wait queue counter for an account. +func (s *ConcurrencyService) DecrementAccountWaitCount(ctx context.Context, accountID int64) { + if s.cache == nil { + return + } + + bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := s.cache.DecrementAccountWaitCount(bgCtx, accountID); err != nil { + log.Printf("Warning: decrement wait count failed for account %d: %v", accountID, err) + } +} + +// GetAccountWaitingCount gets current wait queue count for an account. +func (s *ConcurrencyService) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + if s.cache == nil { + return 0, nil + } + return s.cache.GetAccountWaitingCount(ctx, accountID) +} + // CalculateMaxWait calculates the maximum wait queue size for a user // maxWait = userConcurrency + defaultExtraWaitSlots func CalculateMaxWait(userConcurrency int) int { @@ -186,6 +245,57 @@ func CalculateMaxWait(userConcurrency int) int { return userConcurrency + defaultExtraWaitSlots } +// GetAccountsLoadBatch returns load info for multiple accounts. +func (s *ConcurrencyService) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + if s.cache == nil { + return map[int64]*AccountLoadInfo{}, nil + } + return s.cache.GetAccountsLoadBatch(ctx, accounts) +} + +// CleanupExpiredAccountSlots removes expired slots for one account (background task). +func (s *ConcurrencyService) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { + if s.cache == nil { + return nil + } + return s.cache.CleanupExpiredAccountSlots(ctx, accountID) +} + +// StartSlotCleanupWorker starts a background cleanup worker for expired account slots. +func (s *ConcurrencyService) StartSlotCleanupWorker(accountRepo AccountRepository, interval time.Duration) { + if s == nil || s.cache == nil || accountRepo == nil || interval <= 0 { + return + } + + runCleanup := func() { + listCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + accounts, err := accountRepo.ListSchedulable(listCtx) + cancel() + if err != nil { + log.Printf("Warning: list schedulable accounts failed: %v", err) + return + } + for _, account := range accounts { + accountCtx, accountCancel := context.WithTimeout(context.Background(), 2*time.Second) + err := s.cache.CleanupExpiredAccountSlots(accountCtx, account.ID) + accountCancel() + if err != nil { + log.Printf("Warning: cleanup expired slots failed for account %d: %v", account.ID, err) + } + } + } + + go func() { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + runCleanup() + for range ticker.C { + runCleanup() + } + }() +} + // GetAccountConcurrencyBatch gets current concurrency counts for multiple accounts // Returns a map of accountID -> current concurrency count func (s *ConcurrencyService) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index d779bcfa..e1b61632 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -261,6 +261,34 @@ func TestGatewayService_SelectAccountForModelWithPlatform_PriorityAndLastUsed(t require.Equal(t, int64(2), acc.ID, "同优先级应选择最久未用的账户") } +func TestGatewayService_SelectAccountForModelWithPlatform_GeminiOAuthPreference(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeApiKey}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-2.5-pro", nil, PlatformGemini) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "同优先级且未使用时应优先选择OAuth账户") +} + // TestGatewayService_SelectAccountForModelWithPlatform_NoAvailableAccounts 测试无可用账户 func TestGatewayService_SelectAccountForModelWithPlatform_NoAvailableAccounts(t *testing.T) { ctx := context.Background() @@ -576,6 +604,32 @@ func TestGatewayService_isModelSupportedByAccount(t *testing.T) { func TestGatewayService_selectAccountWithMixedScheduling(t *testing.T) { ctx := context.Background() + t.Run("混合调度-Gemini优先选择OAuth账户", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeApiKey}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "gemini-2.5-pro", nil, PlatformGemini) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "同优先级且未使用时应优先选择OAuth账户") + }) + t.Run("混合调度-包含启用mixed_scheduling的antigravity账户", func(t *testing.T) { repo := &mockAccountRepoForPlatform{ accounts: []Account{ diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index d542e9c2..6c45ff0f 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -13,6 +13,7 @@ import ( "log" "net/http" "regexp" + "sort" "strings" "time" @@ -66,6 +67,20 @@ type GatewayCache interface { RefreshSessionTTL(ctx context.Context, sessionHash string, ttl time.Duration) error } +type AccountWaitPlan struct { + AccountID int64 + MaxConcurrency int + Timeout time.Duration + MaxWaiting int +} + +type AccountSelectionResult struct { + Account *Account + Acquired bool + ReleaseFunc func() + WaitPlan *AccountWaitPlan // nil means no wait allowed +} + // ClaudeUsage 表示Claude API返回的usage信息 type ClaudeUsage struct { InputTokens int `json:"input_tokens"` @@ -108,6 +123,7 @@ type GatewayService struct { identityService *IdentityService httpUpstream HTTPUpstream deferredService *DeferredService + concurrencyService *ConcurrencyService } // NewGatewayService creates a new GatewayService @@ -119,6 +135,7 @@ func NewGatewayService( userSubRepo UserSubscriptionRepository, cache GatewayCache, cfg *config.Config, + concurrencyService *ConcurrencyService, billingService *BillingService, rateLimitService *RateLimitService, billingCacheService *BillingCacheService, @@ -134,6 +151,7 @@ func NewGatewayService( userSubRepo: userSubRepo, cache: cache, cfg: cfg, + concurrencyService: concurrencyService, billingService: billingService, rateLimitService: rateLimitService, billingCacheService: billingCacheService, @@ -183,6 +201,14 @@ func (s *GatewayService) GenerateSessionHash(parsed *ParsedRequest) string { return "" } +// BindStickySession sets session -> account binding with standard TTL. +func (s *GatewayService) BindStickySession(ctx context.Context, sessionHash string, accountID int64) error { + if sessionHash == "" || accountID <= 0 { + return nil + } + return s.cache.SetSessionAccountID(ctx, sessionHash, accountID, stickySessionTTL) +} + func (s *GatewayService) extractCacheableContent(parsed *ParsedRequest) string { if parsed == nil { return "" @@ -332,8 +358,360 @@ func (s *GatewayService) SelectAccountForModelWithExclusions(ctx context.Context return s.selectAccountForModelWithPlatform(ctx, groupID, sessionHash, requestedModel, excludedIDs, platform) } +// SelectAccountWithLoadAwareness selects account with load-awareness and wait plan. +func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { + cfg := s.schedulingConfig() + var stickyAccountID int64 + if sessionHash != "" && s.cache != nil { + if accountID, err := s.cache.GetSessionAccountID(ctx, sessionHash); err == nil { + stickyAccountID = accountID + } + } + if s.concurrencyService == nil || !cfg.LoadBatchEnabled { + account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) + if err != nil { + return nil, err + } + result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err == nil && result.Acquired { + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + + platform, hasForcePlatform, err := s.resolvePlatform(ctx, groupID) + if err != nil { + return nil, err + } + preferOAuth := platform == PlatformGemini + + accounts, useMixed, err := s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, errors.New("no available accounts") + } + + isExcluded := func(accountID int64) bool { + if excludedIDs == nil { + return false + } + _, excluded := excludedIDs[accountID] + return excluded + } + + // ============ Layer 1: 粘性会话优先 ============ + if sessionHash != "" { + accountID, err := s.cache.GetSessionAccountID(ctx, sessionHash) + if err == nil && accountID > 0 && !isExcluded(accountID) { + account, err := s.accountRepo.GetByID(ctx, accountID) + if err == nil && s.isAccountAllowedForPlatform(account, platform, useMixed) && + account.IsSchedulable() && + (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if err == nil && result.Acquired { + _ = s.cache.RefreshSessionTTL(ctx, sessionHash, stickySessionTTL) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + } + } + + // ============ Layer 2: 负载感知选择 ============ + candidates := make([]*Account, 0, len(accounts)) + for i := range accounts { + acc := &accounts[i] + if isExcluded(acc.ID) { + continue + } + if !s.isAccountAllowedForPlatform(acc, platform, useMixed) { + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { + continue + } + candidates = append(candidates, acc) + } + + if len(candidates) == 0 { + return nil, errors.New("no available accounts") + } + + accountLoads := make([]AccountWithConcurrency, 0, len(candidates)) + for _, acc := range candidates { + accountLoads = append(accountLoads, AccountWithConcurrency{ + ID: acc.ID, + MaxConcurrency: acc.Concurrency, + }) + } + + loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) + if err != nil { + if result, ok := s.tryAcquireByLegacyOrder(ctx, candidates, sessionHash, preferOAuth); ok { + return result, nil + } + } else { + type accountWithLoad struct { + account *Account + loadInfo *AccountLoadInfo + } + var available []accountWithLoad + for _, acc := range candidates { + loadInfo := loadMap[acc.ID] + if loadInfo == nil { + loadInfo = &AccountLoadInfo{AccountID: acc.ID} + } + if loadInfo.LoadRate < 100 { + available = append(available, accountWithLoad{ + account: acc, + loadInfo: loadInfo, + }) + } + } + + if len(available) > 0 { + sort.SliceStable(available, func(i, j int) bool { + a, b := available[i], available[j] + if a.account.Priority != b.account.Priority { + return a.account.Priority < b.account.Priority + } + if a.loadInfo.LoadRate != b.loadInfo.LoadRate { + return a.loadInfo.LoadRate < b.loadInfo.LoadRate + } + switch { + case a.account.LastUsedAt == nil && b.account.LastUsedAt != nil: + return true + case a.account.LastUsedAt != nil && b.account.LastUsedAt == nil: + return false + case a.account.LastUsedAt == nil && b.account.LastUsedAt == nil: + if preferOAuth && a.account.Type != b.account.Type { + return a.account.Type == AccountTypeOAuth + } + return false + default: + return a.account.LastUsedAt.Before(*b.account.LastUsedAt) + } + }) + + for _, item := range available { + result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, sessionHash, item.account.ID, stickySessionTTL) + } + return &AccountSelectionResult{ + Account: item.account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + } + } + + // ============ Layer 3: 兜底排队 ============ + sortAccountsByPriorityAndLastUsed(candidates, preferOAuth) + for _, acc := range candidates { + return &AccountSelectionResult{ + Account: acc, + WaitPlan: &AccountWaitPlan{ + AccountID: acc.ID, + MaxConcurrency: acc.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + return nil, errors.New("no available accounts") +} + +func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates []*Account, sessionHash string, preferOAuth bool) (*AccountSelectionResult, bool) { + ordered := append([]*Account(nil), candidates...) + sortAccountsByPriorityAndLastUsed(ordered, preferOAuth) + + for _, acc := range ordered { + result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, sessionHash, acc.ID, stickySessionTTL) + } + return &AccountSelectionResult{ + Account: acc, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, true + } + } + + return nil, false +} + +func (s *GatewayService) schedulingConfig() config.GatewaySchedulingConfig { + if s.cfg != nil { + return s.cfg.Gateway.Scheduling + } + return config.GatewaySchedulingConfig{ + StickySessionMaxWaiting: 3, + StickySessionWaitTimeout: 45 * time.Second, + FallbackWaitTimeout: 30 * time.Second, + FallbackMaxWaiting: 100, + LoadBatchEnabled: true, + SlotCleanupInterval: 30 * time.Second, + } +} + +func (s *GatewayService) resolvePlatform(ctx context.Context, groupID *int64) (string, bool, error) { + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform != "" { + return forcePlatform, true, nil + } + if groupID != nil { + group, err := s.groupRepo.GetByID(ctx, *groupID) + if err != nil { + return "", false, fmt.Errorf("get group failed: %w", err) + } + return group.Platform, false, nil + } + return PlatformAnthropic, false, nil +} + +func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, bool, error) { + useMixed := (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform + if useMixed { + platforms := []string{platform, PlatformAntigravity} + var accounts []Account + var err error + if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, *groupID, platforms) + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, platforms) + } + if err != nil { + return nil, useMixed, err + } + filtered := make([]Account, 0, len(accounts)) + for _, acc := range accounts { + if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { + continue + } + filtered = append(filtered, acc) + } + return filtered, useMixed, nil + } + + var accounts []Account + var err error + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) + } else if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, platform) + if err == nil && len(accounts) == 0 && hasForcePlatform { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) + } + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) + } + if err != nil { + return nil, useMixed, err + } + return accounts, useMixed, nil +} + +func (s *GatewayService) isAccountAllowedForPlatform(account *Account, platform string, useMixed bool) bool { + if account == nil { + return false + } + if useMixed { + if account.Platform == platform { + return true + } + return account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() + } + return account.Platform == platform +} + +func (s *GatewayService) tryAcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) { + if s.concurrencyService == nil { + return &AcquireResult{Acquired: true, ReleaseFunc: func() {}}, nil + } + return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) +} + +func sortAccountsByPriority(accounts []*Account) { + sort.SliceStable(accounts, func(i, j int) bool { + return accounts[i].Priority < accounts[j].Priority + }) +} + +func sortAccountsByPriorityAndLastUsed(accounts []*Account, preferOAuth bool) { + sort.SliceStable(accounts, func(i, j int) bool { + a, b := accounts[i], accounts[j] + if a.Priority != b.Priority { + return a.Priority < b.Priority + } + switch { + case a.LastUsedAt == nil && b.LastUsedAt != nil: + return true + case a.LastUsedAt != nil && b.LastUsedAt == nil: + return false + case a.LastUsedAt == nil && b.LastUsedAt == nil: + if preferOAuth && a.Type != b.Type { + return a.Type == AccountTypeOAuth + } + return false + default: + return a.LastUsedAt.Before(*b.LastUsedAt) + } + }) +} + // selectAccountForModelWithPlatform 选择单平台账户(完全隔离) func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, platform string) (*Account, error) { + preferOAuth := platform == PlatformGemini // 1. 查询粘性会话 if sessionHash != "" { accountID, err := s.cache.GetSessionAccountID(ctx, sessionHash) @@ -389,7 +767,9 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, case acc.LastUsedAt != nil && selected.LastUsedAt == nil: // keep selected (never used is preferred) case acc.LastUsedAt == nil && selected.LastUsedAt == nil: - // keep selected (both never used) + if preferOAuth && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { + selected = acc + } default: if acc.LastUsedAt.Before(*selected.LastUsedAt) { selected = acc @@ -419,6 +799,7 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, // 查询原生平台账户 + 启用 mixed_scheduling 的 antigravity 账户 func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, nativePlatform string) (*Account, error) { platforms := []string{nativePlatform, PlatformAntigravity} + preferOAuth := nativePlatform == PlatformGemini // 1. 查询粘性会话 if sessionHash != "" { @@ -478,7 +859,9 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g case acc.LastUsedAt != nil && selected.LastUsedAt == nil: // keep selected (never used is preferred) case acc.LastUsedAt == nil && selected.LastUsedAt == nil: - // keep selected (both never used) + if preferOAuth && acc.Platform == PlatformGemini && selected.Platform == PlatformGemini && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { + selected = acc + } default: if acc.LastUsedAt.Before(*selected.LastUsedAt) { selected = acc diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 84e98679..f8eb29bd 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -13,6 +13,7 @@ import ( "log" "net/http" "regexp" + "sort" "strconv" "strings" "time" @@ -80,6 +81,7 @@ type OpenAIGatewayService struct { userSubRepo UserSubscriptionRepository cache GatewayCache cfg *config.Config + concurrencyService *ConcurrencyService billingService *BillingService rateLimitService *RateLimitService billingCacheService *BillingCacheService @@ -95,6 +97,7 @@ func NewOpenAIGatewayService( userSubRepo UserSubscriptionRepository, cache GatewayCache, cfg *config.Config, + concurrencyService *ConcurrencyService, billingService *BillingService, rateLimitService *RateLimitService, billingCacheService *BillingCacheService, @@ -108,6 +111,7 @@ func NewOpenAIGatewayService( userSubRepo: userSubRepo, cache: cache, cfg: cfg, + concurrencyService: concurrencyService, billingService: billingService, rateLimitService: rateLimitService, billingCacheService: billingCacheService, @@ -126,6 +130,14 @@ func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context) string { return hex.EncodeToString(hash[:]) } +// BindStickySession sets session -> account binding with standard TTL. +func (s *OpenAIGatewayService) BindStickySession(ctx context.Context, sessionHash string, accountID int64) error { + if sessionHash == "" || accountID <= 0 { + return nil + } + return s.cache.SetSessionAccountID(ctx, "openai:"+sessionHash, accountID, openaiStickySessionTTL) +} + // SelectAccount selects an OpenAI account with sticky session support func (s *OpenAIGatewayService) SelectAccount(ctx context.Context, groupID *int64, sessionHash string) (*Account, error) { return s.SelectAccountForModel(ctx, groupID, sessionHash, "") @@ -218,6 +230,254 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C return selected, nil } +// SelectAccountWithLoadAwareness selects an account with load-awareness and wait plan. +func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { + cfg := s.schedulingConfig() + var stickyAccountID int64 + if sessionHash != "" && s.cache != nil { + if accountID, err := s.cache.GetSessionAccountID(ctx, "openai:"+sessionHash); err == nil { + stickyAccountID = accountID + } + } + if s.concurrencyService == nil || !cfg.LoadBatchEnabled { + account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) + if err != nil { + return nil, err + } + result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err == nil && result.Acquired { + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + + accounts, err := s.listSchedulableAccounts(ctx, groupID) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, errors.New("no available accounts") + } + + isExcluded := func(accountID int64) bool { + if excludedIDs == nil { + return false + } + _, excluded := excludedIDs[accountID] + return excluded + } + + // ============ Layer 1: Sticky session ============ + if sessionHash != "" { + accountID, err := s.cache.GetSessionAccountID(ctx, "openai:"+sessionHash) + if err == nil && accountID > 0 && !isExcluded(accountID) { + account, err := s.accountRepo.GetByID(ctx, accountID) + if err == nil && account.IsSchedulable() && account.IsOpenAI() && + (requestedModel == "" || account.IsModelSupported(requestedModel)) { + result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if err == nil && result.Acquired { + _ = s.cache.RefreshSessionTTL(ctx, "openai:"+sessionHash, openaiStickySessionTTL) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + } + } + + // ============ Layer 2: Load-aware selection ============ + candidates := make([]*Account, 0, len(accounts)) + for i := range accounts { + acc := &accounts[i] + if isExcluded(acc.ID) { + continue + } + if requestedModel != "" && !acc.IsModelSupported(requestedModel) { + continue + } + candidates = append(candidates, acc) + } + + if len(candidates) == 0 { + return nil, errors.New("no available accounts") + } + + accountLoads := make([]AccountWithConcurrency, 0, len(candidates)) + for _, acc := range candidates { + accountLoads = append(accountLoads, AccountWithConcurrency{ + ID: acc.ID, + MaxConcurrency: acc.Concurrency, + }) + } + + loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) + if err != nil { + ordered := append([]*Account(nil), candidates...) + sortAccountsByPriorityAndLastUsed(ordered, false) + for _, acc := range ordered { + result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, "openai:"+sessionHash, acc.ID, openaiStickySessionTTL) + } + return &AccountSelectionResult{ + Account: acc, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + } else { + type accountWithLoad struct { + account *Account + loadInfo *AccountLoadInfo + } + var available []accountWithLoad + for _, acc := range candidates { + loadInfo := loadMap[acc.ID] + if loadInfo == nil { + loadInfo = &AccountLoadInfo{AccountID: acc.ID} + } + if loadInfo.LoadRate < 100 { + available = append(available, accountWithLoad{ + account: acc, + loadInfo: loadInfo, + }) + } + } + + if len(available) > 0 { + sort.SliceStable(available, func(i, j int) bool { + a, b := available[i], available[j] + if a.account.Priority != b.account.Priority { + return a.account.Priority < b.account.Priority + } + if a.loadInfo.LoadRate != b.loadInfo.LoadRate { + return a.loadInfo.LoadRate < b.loadInfo.LoadRate + } + switch { + case a.account.LastUsedAt == nil && b.account.LastUsedAt != nil: + return true + case a.account.LastUsedAt != nil && b.account.LastUsedAt == nil: + return false + case a.account.LastUsedAt == nil && b.account.LastUsedAt == nil: + return false + default: + return a.account.LastUsedAt.Before(*b.account.LastUsedAt) + } + }) + + for _, item := range available { + result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, "openai:"+sessionHash, item.account.ID, openaiStickySessionTTL) + } + return &AccountSelectionResult{ + Account: item.account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + } + } + + // ============ Layer 3: Fallback wait ============ + sortAccountsByPriorityAndLastUsed(candidates, false) + for _, acc := range candidates { + return &AccountSelectionResult{ + Account: acc, + WaitPlan: &AccountWaitPlan{ + AccountID: acc.ID, + MaxConcurrency: acc.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + + return nil, errors.New("no available accounts") +} + +func (s *OpenAIGatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64) ([]Account, error) { + var accounts []Account + var err error + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) + } else if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, PlatformOpenAI) + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) + } + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + return accounts, nil +} + +func (s *OpenAIGatewayService) tryAcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) { + if s.concurrencyService == nil { + return &AcquireResult{Acquired: true, ReleaseFunc: func() {}}, nil + } + return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) +} + +func (s *OpenAIGatewayService) schedulingConfig() config.GatewaySchedulingConfig { + if s.cfg != nil { + return s.cfg.Gateway.Scheduling + } + return config.GatewaySchedulingConfig{ + StickySessionMaxWaiting: 3, + StickySessionWaitTimeout: 45 * time.Second, + FallbackWaitTimeout: 30 * time.Second, + FallbackMaxWaiting: 100, + LoadBatchEnabled: true, + SlotCleanupInterval: 30 * time.Second, + } +} + // GetAccessToken gets the access token for an OpenAI account func (s *OpenAIGatewayService) GetAccessToken(ctx context.Context, account *Account) (string, string, error) { switch account.Type { diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 81e01d47..a202ccf2 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -73,6 +73,15 @@ func ProvideDeferredService(accountRepo AccountRepository, timingWheel *TimingWh return svc } +// ProvideConcurrencyService creates ConcurrencyService and starts slot cleanup worker. +func ProvideConcurrencyService(cache ConcurrencyCache, accountRepo AccountRepository, cfg *config.Config) *ConcurrencyService { + svc := NewConcurrencyService(cache) + if cfg != nil { + svc.StartSlotCleanupWorker(accountRepo, cfg.Gateway.Scheduling.SlotCleanupInterval) + } + return svc +} + // ProviderSet is the Wire provider set for all services var ProviderSet = wire.NewSet( // Core services @@ -107,7 +116,7 @@ var ProviderSet = wire.NewSet( ProvideEmailQueueService, NewTurnstileService, NewSubscriptionService, - NewConcurrencyService, + ProvideConcurrencyService, NewIdentityService, NewCRSSyncService, ProvideUpdateService, From fe31495a893e276e3192b3762ecbf3e4079cd4cf Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 04:15:31 +0800 Subject: [PATCH 14/51] =?UTF-8?q?test(gateway):=20=E8=A1=A5=E5=85=85?= =?UTF-8?q?=E8=B4=A6=E5=8F=B7=E8=B0=83=E5=BA=A6=E4=BC=98=E5=8C=96=E7=9A=84?= =?UTF-8?q?=E5=8D=95=E5=85=83=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 添加 GetAccountsLoadBatch 批量负载查询测试 - 添加 CleanupExpiredAccountSlots 过期槽位清理测试 - 添加 SelectAccountWithLoadAwareness 负载感知选择测试 - 测试覆盖降级行为、账号排除、错误处理等场景 --- .../concurrency_cache_integration_test.go | 132 +++++++++++++++ .../service/gateway_multiplatform_test.go | 157 ++++++++++++++++++ 2 files changed, 289 insertions(+) diff --git a/backend/internal/repository/concurrency_cache_integration_test.go b/backend/internal/repository/concurrency_cache_integration_test.go index f3d70ef1..707cbdab 100644 --- a/backend/internal/repository/concurrency_cache_integration_test.go +++ b/backend/internal/repository/concurrency_cache_integration_test.go @@ -274,6 +274,138 @@ func (s *ConcurrencyCacheSuite) TestGetUserConcurrency_Missing() { require.Equal(s.T(), 0, cur) } +func (s *ConcurrencyCacheSuite) TestGetAccountsLoadBatch() { + // Setup: Create accounts with different load states + account1 := int64(100) + account2 := int64(101) + account3 := int64(102) + + // Account 1: 2/3 slots used, 1 waiting + ok, err := s.cache.AcquireAccountSlot(s.ctx, account1, 3, "req1") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, account1, 3, "req2") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.IncrementAccountWaitCount(s.ctx, account1, 5) + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Account 2: 1/2 slots used, 0 waiting + ok, err = s.cache.AcquireAccountSlot(s.ctx, account2, 2, "req3") + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Account 3: 0/1 slots used, 0 waiting (idle) + + // Query batch load + accounts := []service.AccountWithConcurrency{ + {ID: account1, MaxConcurrency: 3}, + {ID: account2, MaxConcurrency: 2}, + {ID: account3, MaxConcurrency: 1}, + } + + loadMap, err := s.cache.GetAccountsLoadBatch(s.ctx, accounts) + require.NoError(s.T(), err) + require.Len(s.T(), loadMap, 3) + + // Verify account1: (2 + 1) / 3 = 100% + load1 := loadMap[account1] + require.NotNil(s.T(), load1) + require.Equal(s.T(), account1, load1.AccountID) + require.Equal(s.T(), 2, load1.CurrentConcurrency) + require.Equal(s.T(), 1, load1.WaitingCount) + require.Equal(s.T(), 100, load1.LoadRate) + + // Verify account2: (1 + 0) / 2 = 50% + load2 := loadMap[account2] + require.NotNil(s.T(), load2) + require.Equal(s.T(), account2, load2.AccountID) + require.Equal(s.T(), 1, load2.CurrentConcurrency) + require.Equal(s.T(), 0, load2.WaitingCount) + require.Equal(s.T(), 50, load2.LoadRate) + + // Verify account3: (0 + 0) / 1 = 0% + load3 := loadMap[account3] + require.NotNil(s.T(), load3) + require.Equal(s.T(), account3, load3.AccountID) + require.Equal(s.T(), 0, load3.CurrentConcurrency) + require.Equal(s.T(), 0, load3.WaitingCount) + require.Equal(s.T(), 0, load3.LoadRate) +} + +func (s *ConcurrencyCacheSuite) TestGetAccountsLoadBatch_Empty() { + // Test with empty account list + loadMap, err := s.cache.GetAccountsLoadBatch(s.ctx, []service.AccountWithConcurrency{}) + require.NoError(s.T(), err) + require.Empty(s.T(), loadMap) +} + +func (s *ConcurrencyCacheSuite) TestCleanupExpiredAccountSlots() { + accountID := int64(200) + slotKey := fmt.Sprintf("%s%d", accountSlotKeyPrefix, accountID) + + // Acquire 3 slots + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req1") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req2") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req3") + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Verify 3 slots exist + cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 3, cur) + + // Manually set old timestamps for req1 and req2 (simulate expired slots) + now := time.Now().Unix() + expiredTime := now - int64(testSlotTTL.Seconds()) - 10 // 10 seconds past TTL + err = s.rdb.ZAdd(s.ctx, slotKey, redis.Z{Score: float64(expiredTime), Member: "req1"}).Err() + require.NoError(s.T(), err) + err = s.rdb.ZAdd(s.ctx, slotKey, redis.Z{Score: float64(expiredTime), Member: "req2"}).Err() + require.NoError(s.T(), err) + + // Run cleanup + err = s.cache.CleanupExpiredAccountSlots(s.ctx, accountID) + require.NoError(s.T(), err) + + // Verify only 1 slot remains (req3) + cur, err = s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 1, cur) + + // Verify req3 still exists + members, err := s.rdb.ZRange(s.ctx, slotKey, 0, -1).Result() + require.NoError(s.T(), err) + require.Len(s.T(), members, 1) + require.Equal(s.T(), "req3", members[0]) +} + +func (s *ConcurrencyCacheSuite) TestCleanupExpiredAccountSlots_NoExpired() { + accountID := int64(201) + + // Acquire 2 fresh slots + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req1") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req2") + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Run cleanup (should not remove anything) + err = s.cache.CleanupExpiredAccountSlots(s.ctx, accountID) + require.NoError(s.T(), err) + + // Verify both slots still exist + cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 2, cur) +} + func TestConcurrencyCacheSuite(t *testing.T) { suite.Run(t, new(ConcurrencyCacheSuite)) } diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index e1b61632..560c7767 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -837,3 +837,160 @@ func TestAccount_IsMixedSchedulingEnabled(t *testing.T) { }) } } + +// mockConcurrencyService for testing +type mockConcurrencyService struct { + accountLoads map[int64]*AccountLoadInfo + accountWaitCounts map[int64]int + acquireResults map[int64]bool +} + +func (m *mockConcurrencyService) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + if m.accountLoads == nil { + return map[int64]*AccountLoadInfo{}, nil + } + result := make(map[int64]*AccountLoadInfo) + for _, acc := range accounts { + if load, ok := m.accountLoads[acc.ID]; ok { + result[acc.ID] = load + } else { + result[acc.ID] = &AccountLoadInfo{ + AccountID: acc.ID, + CurrentConcurrency: 0, + WaitingCount: 0, + LoadRate: 0, + } + } + } + return result, nil +} + +func (m *mockConcurrencyService) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + if m.accountWaitCounts == nil { + return 0, nil + } + return m.accountWaitCounts[accountID], nil +} + +// TestGatewayService_SelectAccountWithLoadAwareness tests load-aware account selection +func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { + ctx := context.Background() + + t.Run("禁用负载批量查询-降级到传统选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, // No concurrency service + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(1), result.Account.ID, "应选择优先级最高的账号") + }) + + t.Run("无ConcurrencyService-降级到传统选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "应选择优先级最高的账号") + }) + + t.Run("排除账号-不选择被排除的账号", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + excludedIDs := map[int64]struct{}{1: {}} + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", excludedIDs) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "不应选择被排除的账号") + }) + + t.Run("无可用账号-返回错误", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{}, + accountsByID: map[int64]*Account{}, + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "no available accounts") + }) +} From 34c102045ac46de7b28d77530bc0e7eca120af95 Mon Sep 17 00:00:00 2001 From: IanShaw <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 04:21:18 +0800 Subject: [PATCH 15/51] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20/v1/messages?= =?UTF-8?q?=20=E9=97=B4=E6=AD=87=E6=80=A7=20400=20=E9=94=99=E8=AF=AF=20(#1?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(upstream): 修复上游格式兼容性问题 - 跳过Claude模型无signature的thinking block - 支持custom类型工具(MCP)格式转换 - 添加ClaudeCustomToolSpec结构体支持MCP工具 - 添加Custom字段验证,跳过无效custom工具 - 在convertClaudeToolsToGeminiTools中添加schema清理 - 完整的单元测试覆盖,包含边界情况 修复: Issue 0.1 signature缺失, Issue 0.2 custom工具格式 改进: Codex审查发现的2个重要问题 测试: - TestBuildParts_ThinkingBlockWithoutSignature: 验证thinking block处理 - TestBuildTools_CustomTypeTools: 验证custom工具转换和边界情况 - TestConvertClaudeToolsToGeminiTools_CustomType: 验证service层转换 * feat(gemini): 添加Gemini限额与TierID支持 实现PR1:Gemini限额与TierID功能 后端修改: - GeminiTokenInfo结构体添加TierID字段 - fetchProjectID函数返回(projectID, tierID, error) - 从LoadCodeAssist响应中提取tierID(优先IsDefault,回退到第一个非空tier) - ExchangeCode、RefreshAccountToken、GetAccessToken函数更新以处理tierID - BuildAccountCredentials函数保存tier_id到credentials 前端修改: - AccountStatusIndicator组件添加tier显示 - 支持LEGACY/PRO/ULTRA等tier类型的友好显示 - 使用蓝色badge展示tier信息 技术细节: - tierID提取逻辑:优先选择IsDefault的tier,否则选择第一个非空tier - 所有fetchProjectID调用点已更新以处理新的返回签名 - 前端gracefully处理missing/unknown tier_id * refactor(gemini): 优化TierID实现并添加安全验证 根据并发代码审查(code-reviewer, security-auditor, gemini, codex)的反馈进行改进: 安全改进: - 添加validateTierID函数验证tier_id格式和长度(最大64字符) - 限制tier_id字符集为字母数字、下划线、连字符和斜杠 - 在BuildAccountCredentials中验证tier_id后再存储 - 静默跳过无效tier_id,不阻塞账户创建 代码质量改进: - 提取extractTierIDFromAllowedTiers辅助函数消除重复代码 - 重构fetchProjectID函数,tierID提取逻辑只执行一次 - 改进代码可读性和可维护性 审查工具: - code-reviewer agent (a09848e) - security-auditor agent (a9a149c) - gemini CLI (bcc7c81) - codex (b5d8919) 修复问题: - HIGH: 未验证的tier_id输入 - MEDIUM: 代码重复(tierID提取逻辑重复2次) * fix(format): 修复 gofmt 格式问题 - 修复 claude_types.go 中的字段对齐问题 - 修复 gemini_messages_compat_service.go 中的缩进问题 * fix(upstream): 修复上游格式兼容性问题 (#14) * fix(upstream): 修复上游格式兼容性问题 - 跳过Claude模型无signature的thinking block - 支持custom类型工具(MCP)格式转换 - 添加ClaudeCustomToolSpec结构体支持MCP工具 - 添加Custom字段验证,跳过无效custom工具 - 在convertClaudeToolsToGeminiTools中添加schema清理 - 完整的单元测试覆盖,包含边界情况 修复: Issue 0.1 signature缺失, Issue 0.2 custom工具格式 改进: Codex审查发现的2个重要问题 测试: - TestBuildParts_ThinkingBlockWithoutSignature: 验证thinking block处理 - TestBuildTools_CustomTypeTools: 验证custom工具转换和边界情况 - TestConvertClaudeToolsToGeminiTools_CustomType: 验证service层转换 * fix(format): 修复 gofmt 格式问题 - 修复 claude_types.go 中的字段对齐问题 - 修复 gemini_messages_compat_service.go 中的缩进问题 * fix(format): 修复 claude_types.go 的 gofmt 格式问题 * feat(antigravity): 优化 thinking block 和 schema 处理 - 为 dummy thinking block 添加 ThoughtSignature - 重构 thinking block 处理逻辑,在每个条件分支内创建 part - 优化 excludedSchemaKeys,移除 Gemini 实际支持的字段 (minItems, maxItems, minimum, maximum, additionalProperties, format) - 添加详细注释说明 Gemini API 支持的 schema 字段 * fix(antigravity): 增强 schema 清理的安全性 基于 Codex review 建议: - 添加 format 字段白名单过滤,只保留 Gemini 支持的 date-time/date/time - 补充更多不支持的 schema 关键字到黑名单: * 组合 schema: oneOf, anyOf, allOf, not, if/then/else * 对象验证: minProperties, maxProperties, patternProperties 等 * 定义引用: $defs, definitions - 避免不支持的 schema 字段导致 Gemini API 校验失败 * fix(lint): 修复 gemini_messages_compat_service 空分支警告 - 在 cleanToolSchema 的 if 语句中添加 continue - 移除重复的注释 * fix(antigravity): 移除 minItems/maxItems 以兼容 Claude API - 将 minItems 和 maxItems 添加到 schema 黑名单 - Claude API (Vertex AI) 不支持这些数组验证字段 - 添加调试日志记录工具 schema 转换过程 - 修复 tools.14.custom.input_schema 验证错误 * fix(antigravity): 修复 additionalProperties schema 对象问题 - 将 additionalProperties 的 schema 对象转换为布尔值 true - Claude API 只支持 additionalProperties: false,不支持 schema 对象 - 修复 tools.14.custom.input_schema 验证错误 - 参考 Claude 官方文档的 JSON Schema 限制 * fix(antigravity): 修复 Claude 模型 thinking 块兼容性问题 - 完全跳过 Claude 模型的 thinking 块以避免 signature 验证失败 - 只在 Gemini 模型中使用 dummy thought signature - 修改 additionalProperties 默认值为 false(更安全) - 添加调试日志以便排查问题 * fix(upstream): 修复跨模型切换时的 dummy signature 问题 基于 Codex review 和用户场景分析的修复: 1. 问题场景 - Gemini (thinking) → Claude (thinking) 切换时 - Gemini 返回的 thinking 块使用 dummy signature - Claude API 会拒绝 dummy signature,导致 400 错误 2. 修复内容 - request_transformer.go:262: 跳过 dummy signature - 只保留真实的 Claude signature - 支持频繁的跨模型切换 3. 其他修复(基于 Codex review) - gateway_service.go:691: 修复 io.ReadAll 错误处理 - gateway_service.go:687: 条件日志(尊重 LogUpstreamErrorBody 配置) - gateway_service.go:915: 收紧 400 failover 启发式 - request_transformer.go:188: 移除签名成功日志 4. 新增功能(默认关闭) - 阶段 1: 上游错误日志(GATEWAY_LOG_UPSTREAM_ERROR_BODY) - 阶段 2: Antigravity thinking 修复 - 阶段 3: API-key beta 注入(GATEWAY_INJECT_BETA_FOR_APIKEY) - 阶段 3: 智能 400 failover(GATEWAY_FAILOVER_ON_400) 测试:所有测试通过 * fix(lint): 修复 golangci-lint 问题 - 应用 De Morgan 定律简化条件判断 - 修复 gofmt 格式问题 - 移除未使用的 min 函数 --- backend/internal/config/config.go | 15 ++ .../internal/pkg/antigravity/claude_types.go | 3 + .../pkg/antigravity/request_transformer.go | 223 +++++++++++++----- .../antigravity/request_transformer_test.go | 179 ++++++++++++++ backend/internal/pkg/claude/constants.go | 6 + .../service/antigravity_gateway_service.go | 9 + backend/internal/service/gateway_service.go | 138 +++++++++++ .../service/gemini_messages_compat_service.go | 39 ++- .../gemini_messages_compat_service_test.go | 128 ++++++++++ .../internal/service/gemini_oauth_service.go | 104 +++++--- .../internal/service/gemini_token_provider.go | 5 +- deploy/config.example.yaml | 15 ++ frontend/package-lock.json | 10 + .../account/AccountStatusIndicator.vue | 27 +++ 14 files changed, 815 insertions(+), 86 deletions(-) create mode 100644 backend/internal/pkg/antigravity/request_transformer_test.go create mode 100644 backend/internal/service/gemini_messages_compat_service_test.go diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index aeeddcb4..d3674932 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -119,6 +119,17 @@ type GatewayConfig struct { // ConcurrencySlotTTLMinutes: 并发槽位过期时间(分钟) // 应大于最长 LLM 请求时间,防止请求完成前槽位过期 ConcurrencySlotTTLMinutes int `mapstructure:"concurrency_slot_ttl_minutes"` + + // 是否记录上游错误响应体摘要(避免输出请求内容) + LogUpstreamErrorBody bool `mapstructure:"log_upstream_error_body"` + // 上游错误响应体记录最大字节数(超过会截断) + LogUpstreamErrorBodyMaxBytes int `mapstructure:"log_upstream_error_body_max_bytes"` + + // API-key 账号在客户端未提供 anthropic-beta 时,是否按需自动补齐(默认关闭以保持兼容) + InjectBetaForApiKey bool `mapstructure:"inject_beta_for_apikey"` + + // 是否允许对部分 400 错误触发 failover(默认关闭以避免改变语义) + FailoverOn400 bool `mapstructure:"failover_on_400"` } func (s *ServerConfig) Address() string { @@ -313,6 +324,10 @@ func setDefaults() { // Gateway viper.SetDefault("gateway.response_header_timeout", 300) // 300秒(5分钟)等待上游响应头,LLM高负载时可能排队较久 + viper.SetDefault("gateway.log_upstream_error_body", false) + viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048) + viper.SetDefault("gateway.inject_beta_for_apikey", false) + viper.SetDefault("gateway.failover_on_400", false) viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) viper.SetDefault("gateway.connection_pool_isolation", ConnectionPoolIsolationAccountProxy) // HTTP 上游连接池配置(针对 5000+ 并发用户优化) diff --git a/backend/internal/pkg/antigravity/claude_types.go b/backend/internal/pkg/antigravity/claude_types.go index 01b805cd..34e6b1f4 100644 --- a/backend/internal/pkg/antigravity/claude_types.go +++ b/backend/internal/pkg/antigravity/claude_types.go @@ -54,6 +54,9 @@ type CustomToolSpec struct { InputSchema map[string]any `json:"input_schema"` } +// ClaudeCustomToolSpec 兼容旧命名(MCP custom 工具规格) +type ClaudeCustomToolSpec = CustomToolSpec + // SystemBlock system prompt 数组形式的元素 type SystemBlock struct { Type string `json:"type"` diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index e0b5b886..83b87a32 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -14,13 +14,16 @@ func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel st // 用于存储 tool_use id -> name 映射 toolIDToName := make(map[string]string) - // 检测是否启用 thinking - isThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" - // 只有 Gemini 模型支持 dummy thought workaround // Claude 模型通过 Vertex/Google API 需要有效的 thought signatures allowDummyThought := strings.HasPrefix(mappedModel, "gemini-") + // 检测是否启用 thinking + requestedThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" + // 为避免 Claude 模型的 thought signature/消息块约束导致 400(上游要求 thinking 块开头等), + // 非 Gemini 模型默认不启用 thinking(除非未来支持完整签名链路)。 + isThinkingEnabled := requestedThinkingEnabled && allowDummyThought + // 1. 构建 contents contents, err := buildContents(claudeReq.Messages, toolIDToName, isThinkingEnabled, allowDummyThought) if err != nil { @@ -31,7 +34,15 @@ func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel st systemInstruction := buildSystemInstruction(claudeReq.System, claudeReq.Model) // 3. 构建 generationConfig - generationConfig := buildGenerationConfig(claudeReq) + reqForGen := claudeReq + if requestedThinkingEnabled && !allowDummyThought { + log.Printf("[Warning] Disabling thinking for non-Gemini model in antigravity transform: model=%s", mappedModel) + // shallow copy to avoid mutating caller's request + clone := *claudeReq + clone.Thinking = nil + reqForGen = &clone + } + generationConfig := buildGenerationConfig(reqForGen) // 4. 构建 tools tools := buildTools(claudeReq.Tools) @@ -148,8 +159,9 @@ func buildContents(messages []ClaudeMessage, toolIDToName map[string]string, isT if !hasThoughtPart && len(parts) > 0 { // 在开头添加 dummy thinking block parts = append([]GeminiPart{{ - Text: "Thinking...", - Thought: true, + Text: "Thinking...", + Thought: true, + ThoughtSignature: dummyThoughtSignature, }}, parts...) } } @@ -171,6 +183,34 @@ func buildContents(messages []ClaudeMessage, toolIDToName map[string]string, isT // 参考: https://ai.google.dev/gemini-api/docs/thought-signatures const dummyThoughtSignature = "skip_thought_signature_validator" +// isValidThoughtSignature 验证 thought signature 是否有效 +// Claude API 要求 signature 必须是 base64 编码的字符串,长度至少 32 字节 +func isValidThoughtSignature(signature string) bool { + // 空字符串无效 + if signature == "" { + return false + } + + // signature 应该是 base64 编码,长度至少 40 个字符(约 30 字节) + // 参考 Claude API 文档和实际观察到的有效 signature + if len(signature) < 40 { + log.Printf("[Debug] Signature too short: len=%d", len(signature)) + return false + } + + // 检查是否是有效的 base64 字符 + // base64 字符集: A-Z, a-z, 0-9, +, /, = + for i, c := range signature { + if (c < 'A' || c > 'Z') && (c < 'a' || c > 'z') && + (c < '0' || c > '9') && c != '+' && c != '/' && c != '=' { + log.Printf("[Debug] Invalid base64 character at position %d: %c (code=%d)", i, c, c) + return false + } + } + + return true +} + // buildParts 构建消息的 parts // allowDummyThought: 只有 Gemini 模型支持 dummy thought signature func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDummyThought bool) ([]GeminiPart, error) { @@ -199,22 +239,30 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu } case "thinking": - part := GeminiPart{ - Text: block.Thinking, - Thought: true, - } - // 保留原有 signature(Claude 模型需要有效的 signature) - if block.Signature != "" { - part.ThoughtSignature = block.Signature - } else if !allowDummyThought { - // Claude 模型需要有效 signature,跳过无 signature 的 thinking block - log.Printf("Warning: skipping thinking block without signature for Claude model") + if allowDummyThought { + // Gemini 模型可以使用 dummy signature + parts = append(parts, GeminiPart{ + Text: block.Thinking, + Thought: true, + ThoughtSignature: dummyThoughtSignature, + }) continue - } else { - // Gemini 模型使用 dummy signature - part.ThoughtSignature = dummyThoughtSignature } - parts = append(parts, part) + + // Claude 模型:仅在提供有效 signature 时保留 thinking block;否则跳过以避免上游校验失败。 + signature := strings.TrimSpace(block.Signature) + if signature == "" || signature == dummyThoughtSignature { + log.Printf("[Warning] Skipping thinking block for Claude model (missing or dummy signature)") + continue + } + if !isValidThoughtSignature(signature) { + log.Printf("[Debug] Thinking signature may be invalid (passing through anyway): len=%d", len(signature)) + } + parts = append(parts, GeminiPart{ + Text: block.Thinking, + Thought: true, + ThoughtSignature: signature, + }) case "image": if block.Source != nil && block.Source.Type == "base64" { @@ -239,10 +287,9 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu ID: block.ID, }, } - // 保留原有 signature,或对 Gemini 模型使用 dummy signature - if block.Signature != "" { - part.ThoughtSignature = block.Signature - } else if allowDummyThought { + // 只有 Gemini 模型使用 dummy signature + // Claude 模型不设置 signature(避免验证问题) + if allowDummyThought { part.ThoughtSignature = dummyThoughtSignature } parts = append(parts, part) @@ -386,9 +433,9 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 普通工具 var funcDecls []GeminiFunctionDecl - for _, tool := range tools { + for i, tool := range tools { // 跳过无效工具名称 - if tool.Name == "" { + if strings.TrimSpace(tool.Name) == "" { log.Printf("Warning: skipping tool with empty name") continue } @@ -397,10 +444,18 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { var inputSchema map[string]any // 检查是否为 custom 类型工具 (MCP) - if tool.Type == "custom" && tool.Custom != nil { - // Custom 格式: 从 custom 字段获取 description 和 input_schema + if tool.Type == "custom" { + if tool.Custom == nil || tool.Custom.InputSchema == nil { + log.Printf("[Warning] Skipping invalid custom tool '%s': missing custom spec or input_schema", tool.Name) + continue + } description = tool.Custom.Description inputSchema = tool.Custom.InputSchema + + // 调试日志:记录 custom 工具的 schema + if schemaJSON, err := json.Marshal(inputSchema); err == nil { + log.Printf("[Debug] Tool[%d] '%s' (custom) original schema: %s", i, tool.Name, string(schemaJSON)) + } } else { // 标准格式: 从顶层字段获取 description = tool.Description @@ -409,7 +464,6 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 清理 JSON Schema params := cleanJSONSchema(inputSchema) - // 为 nil schema 提供默认值 if params == nil { params = map[string]any{ @@ -418,6 +472,11 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { } } + // 调试日志:记录清理后的 schema + if paramsJSON, err := json.Marshal(params); err == nil { + log.Printf("[Debug] Tool[%d] '%s' cleaned schema: %s", i, tool.Name, string(paramsJSON)) + } + funcDecls = append(funcDecls, GeminiFunctionDecl{ Name: tool.Name, Description: description, @@ -479,31 +538,64 @@ func cleanJSONSchema(schema map[string]any) map[string]any { } // excludedSchemaKeys 不支持的 schema 字段 +// 基于 Claude API (Vertex AI) 的实际支持情况 +// 支持: type, description, enum, properties, required, additionalProperties, items +// 不支持: minItems, maxItems, minLength, maxLength, pattern, minimum, maximum 等验证字段 var excludedSchemaKeys = map[string]bool{ - "$schema": true, - "$id": true, - "$ref": true, - "additionalProperties": true, - "minLength": true, - "maxLength": true, - "minItems": true, - "maxItems": true, - "uniqueItems": true, - "minimum": true, - "maximum": true, - "exclusiveMinimum": true, - "exclusiveMaximum": true, - "pattern": true, - "format": true, - "default": true, - "strict": true, - "const": true, - "examples": true, - "deprecated": true, - "readOnly": true, - "writeOnly": true, - "contentMediaType": true, - "contentEncoding": true, + // 元 schema 字段 + "$schema": true, + "$id": true, + "$ref": true, + + // 字符串验证(Gemini 不支持) + "minLength": true, + "maxLength": true, + "pattern": true, + + // 数字验证(Claude API 通过 Vertex AI 不支持这些字段) + "minimum": true, + "maximum": true, + "exclusiveMinimum": true, + "exclusiveMaximum": true, + "multipleOf": true, + + // 数组验证(Claude API 通过 Vertex AI 不支持这些字段) + "uniqueItems": true, + "minItems": true, + "maxItems": true, + + // 组合 schema(Gemini 不支持) + "oneOf": true, + "anyOf": true, + "allOf": true, + "not": true, + "if": true, + "then": true, + "else": true, + "$defs": true, + "definitions": true, + + // 对象验证(仅保留 properties/required/additionalProperties) + "minProperties": true, + "maxProperties": true, + "patternProperties": true, + "propertyNames": true, + "dependencies": true, + "dependentSchemas": true, + "dependentRequired": true, + + // 其他不支持的字段 + "default": true, + "const": true, + "examples": true, + "deprecated": true, + "readOnly": true, + "writeOnly": true, + "contentMediaType": true, + "contentEncoding": true, + + // Claude 特有字段 + "strict": true, } // cleanSchemaValue 递归清理 schema 值 @@ -523,6 +615,31 @@ func cleanSchemaValue(value any) any { continue } + // 特殊处理 format 字段:只保留 Gemini 支持的 format 值 + if k == "format" { + if formatStr, ok := val.(string); ok { + // Gemini 只支持 date-time, date, time + if formatStr == "date-time" || formatStr == "date" || formatStr == "time" { + result[k] = val + } + // 其他 format 值直接跳过 + } + continue + } + + // 特殊处理 additionalProperties:Claude API 只支持布尔值,不支持 schema 对象 + if k == "additionalProperties" { + if boolVal, ok := val.(bool); ok { + result[k] = boolVal + log.Printf("[Debug] additionalProperties is bool: %v", boolVal) + } else { + // 如果是 schema 对象,转换为 false(更安全的默认值) + result[k] = false + log.Printf("[Debug] additionalProperties is not bool (type: %T), converting to false", val) + } + continue + } + // 递归清理所有值 result[k] = cleanSchemaValue(val) } diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go new file mode 100644 index 00000000..56eebad0 --- /dev/null +++ b/backend/internal/pkg/antigravity/request_transformer_test.go @@ -0,0 +1,179 @@ +package antigravity + +import ( + "encoding/json" + "testing" +) + +// TestBuildParts_ThinkingBlockWithoutSignature 测试thinking block无signature时的处理 +func TestBuildParts_ThinkingBlockWithoutSignature(t *testing.T) { + tests := []struct { + name string + content string + allowDummyThought bool + expectedParts int + description string + }{ + { + name: "Claude model - skip thinking block without signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": ""}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: false, + expectedParts: 2, // 只有两个text block + description: "Claude模型应该跳过无signature的thinking block", + }, + { + name: "Claude model - keep thinking block with signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": "valid_sig"}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: false, + expectedParts: 3, // 三个block都保留 + description: "Claude模型应该保留有signature的thinking block", + }, + { + name: "Gemini model - use dummy signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": ""}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: true, + expectedParts: 3, // 三个block都保留,thinking使用dummy signature + description: "Gemini模型应该为无signature的thinking block使用dummy signature", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + toolIDToName := make(map[string]string) + parts, err := buildParts(json.RawMessage(tt.content), toolIDToName, tt.allowDummyThought) + + if err != nil { + t.Fatalf("buildParts() error = %v", err) + } + + if len(parts) != tt.expectedParts { + t.Errorf("%s: got %d parts, want %d parts", tt.description, len(parts), tt.expectedParts) + } + }) + } +} + +// TestBuildTools_CustomTypeTools 测试custom类型工具转换 +func TestBuildTools_CustomTypeTools(t *testing.T) { + tests := []struct { + name string + tools []ClaudeTool + expectedLen int + description string + }{ + { + name: "Standard tool format", + tools: []ClaudeTool{ + { + Name: "get_weather", + Description: "Get weather information", + InputSchema: map[string]any{ + "type": "object", + "properties": map[string]any{ + "location": map[string]any{"type": "string"}, + }, + }, + }, + }, + expectedLen: 1, + description: "标准工具格式应该正常转换", + }, + { + name: "Custom type tool (MCP format)", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "mcp_tool", + Custom: &ClaudeCustomToolSpec{ + Description: "MCP tool description", + InputSchema: map[string]any{ + "type": "object", + "properties": map[string]any{ + "param": map[string]any{"type": "string"}, + }, + }, + }, + }, + }, + expectedLen: 1, + description: "Custom类型工具应该从Custom字段读取description和input_schema", + }, + { + name: "Mixed standard and custom tools", + tools: []ClaudeTool{ + { + Name: "standard_tool", + Description: "Standard tool", + InputSchema: map[string]any{"type": "object"}, + }, + { + Type: "custom", + Name: "custom_tool", + Custom: &ClaudeCustomToolSpec{ + Description: "Custom tool", + InputSchema: map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, // 返回一个GeminiToolDeclaration,包含2个function declarations + description: "混合标准和custom工具应该都能正确转换", + }, + { + name: "Invalid custom tool - nil Custom field", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "invalid_custom", + // Custom 为 nil + }, + }, + expectedLen: 0, // 应该被跳过 + description: "Custom字段为nil的custom工具应该被跳过", + }, + { + name: "Invalid custom tool - nil InputSchema", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "invalid_custom", + Custom: &ClaudeCustomToolSpec{ + Description: "Invalid", + // InputSchema 为 nil + }, + }, + }, + expectedLen: 0, // 应该被跳过 + description: "InputSchema为nil的custom工具应该被跳过", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := buildTools(tt.tools) + + if len(result) != tt.expectedLen { + t.Errorf("%s: got %d tool declarations, want %d", tt.description, len(result), tt.expectedLen) + } + + // 验证function declarations存在 + if len(result) > 0 && result[0].FunctionDeclarations != nil { + if len(result[0].FunctionDeclarations) != len(tt.tools) { + t.Errorf("%s: got %d function declarations, want %d", + tt.description, len(result[0].FunctionDeclarations), len(tt.tools)) + } + } + }) + } +} diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index 97ad6c83..0db3ed4a 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -16,6 +16,12 @@ const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleav // HaikuBetaHeader Haiku 模型使用的 anthropic-beta header(不需要 claude-code beta) const HaikuBetaHeader = BetaOAuth + "," + BetaInterleavedThinking +// ApiKeyBetaHeader API-key 账号建议使用的 anthropic-beta header(不包含 oauth) +const ApiKeyBetaHeader = BetaClaudeCode + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming + +// ApiKeyHaikuBetaHeader Haiku 模型在 API-key 账号下使用的 anthropic-beta header(不包含 oauth / claude-code) +const ApiKeyHaikuBetaHeader = BetaInterleavedThinking + // Claude Code 客户端默认请求头 var DefaultHeaders = map[string]string{ "User-Agent": "claude-cli/2.0.62 (external, cli)", diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index ae2976f8..5b3bf565 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -358,6 +358,15 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return nil, fmt.Errorf("transform request: %w", err) } + // 调试:记录转换后的请求体(仅记录前 2000 字符) + if bodyJSON, err := json.Marshal(geminiBody); err == nil { + truncated := string(bodyJSON) + if len(truncated) > 2000 { + truncated = truncated[:2000] + "..." + } + log.Printf("[Debug] Transformed Gemini request: %s", truncated) + } + // 构建上游 action action := "generateContent" if claudeReq.Stream { diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index d542e9c2..5884602d 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -19,6 +19,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/tidwall/gjson" "github.com/tidwall/sjson" "github.com/gin-gonic/gin" @@ -684,6 +685,30 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // 处理错误响应(不可重试的错误) if resp.StatusCode >= 400 { + // 可选:对部分 400 触发 failover(默认关闭以保持语义) + if resp.StatusCode == 400 && s.cfg != nil && s.cfg.Gateway.FailoverOn400 { + respBody, readErr := io.ReadAll(resp.Body) + if readErr != nil { + // ReadAll failed, fall back to normal error handling without consuming the stream + return s.handleErrorResponse(ctx, resp, c, account) + } + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + if s.shouldFailoverOn400(respBody) { + if s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Account %d: 400 error, attempting failover: %s", + account.ID, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } else { + log.Printf("Account %d: 400 error, attempting failover", account.ID) + } + s.handleFailoverSideEffects(ctx, resp, account) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + } return s.handleErrorResponse(ctx, resp, c, account) } @@ -786,6 +811,13 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 处理anthropic-beta header(OAuth账号需要特殊处理) if tokenType == "oauth" { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForApiKey && req.Header.Get("anthropic-beta") == "" { + // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) + if requestNeedsBetaFeatures(body) { + if beta := defaultApiKeyBetaHeader(body); beta != "" { + req.Header.Set("anthropic-beta", beta) + } + } } return req, nil @@ -838,6 +870,83 @@ func (s *GatewayService) getBetaHeader(modelID string, clientBetaHeader string) return claude.DefaultBetaHeader } +func requestNeedsBetaFeatures(body []byte) bool { + tools := gjson.GetBytes(body, "tools") + if tools.Exists() && tools.IsArray() && len(tools.Array()) > 0 { + return true + } + if strings.EqualFold(gjson.GetBytes(body, "thinking.type").String(), "enabled") { + return true + } + return false +} + +func defaultApiKeyBetaHeader(body []byte) string { + modelID := gjson.GetBytes(body, "model").String() + if strings.Contains(strings.ToLower(modelID), "haiku") { + return claude.ApiKeyHaikuBetaHeader + } + return claude.ApiKeyBetaHeader +} + +func truncateForLog(b []byte, maxBytes int) string { + if maxBytes <= 0 { + maxBytes = 2048 + } + if len(b) > maxBytes { + b = b[:maxBytes] + } + s := string(b) + // 保持一行,避免污染日志格式 + s = strings.ReplaceAll(s, "\n", "\\n") + s = strings.ReplaceAll(s, "\r", "\\r") + return s +} + +func (s *GatewayService) shouldFailoverOn400(respBody []byte) bool { + // 只对“可能是兼容性差异导致”的 400 允许切换,避免无意义重试。 + // 默认保守:无法识别则不切换。 + msg := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) + if msg == "" { + return false + } + + // 缺少/错误的 beta header:换账号/链路可能成功(尤其是混合调度时)。 + // 更精确匹配 beta 相关的兼容性问题,避免误触发切换。 + if strings.Contains(msg, "anthropic-beta") || + strings.Contains(msg, "beta feature") || + strings.Contains(msg, "requires beta") { + return true + } + + // thinking/tool streaming 等兼容性约束(常见于中间转换链路) + if strings.Contains(msg, "thinking") || strings.Contains(msg, "thought_signature") || strings.Contains(msg, "signature") { + return true + } + if strings.Contains(msg, "tool_use") || strings.Contains(msg, "tool_result") || strings.Contains(msg, "tools") { + return true + } + + return false +} + +func extractUpstreamErrorMessage(body []byte) string { + // Claude 风格:{"type":"error","error":{"type":"...","message":"..."}} + if m := gjson.GetBytes(body, "error.message").String(); strings.TrimSpace(m) != "" { + inner := strings.TrimSpace(m) + // 有些上游会把完整 JSON 作为字符串塞进 message + if strings.HasPrefix(inner, "{") { + if innerMsg := gjson.Get(inner, "error.message").String(); strings.TrimSpace(innerMsg) != "" { + return innerMsg + } + } + return m + } + + // 兜底:尝试顶层 message + return gjson.GetBytes(body, "message").String() +} + func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { body, _ := io.ReadAll(resp.Body) @@ -850,6 +959,16 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res switch resp.StatusCode { case 400: + // 仅记录上游错误摘要(避免输出请求内容);需要时可通过配置打开 + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Upstream 400 error (account=%d platform=%s type=%s): %s", + account.ID, + account.Platform, + account.Type, + truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } c.Data(http.StatusBadRequest, "application/json", body) return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) case 401: @@ -1329,6 +1448,18 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, // 标记账号状态(429/529等) s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + // 记录上游错误摘要便于排障(不回显请求内容) + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "count_tokens upstream error %d (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + // 返回简化的错误响应 errMsg := "Upstream request failed" switch resp.StatusCode { @@ -1409,6 +1540,13 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // OAuth 账号:处理 anthropic-beta header if tokenType == "oauth" { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForApiKey && req.Header.Get("anthropic-beta") == "" { + // API-key:与 messages 同步的按需 beta 注入(默认关闭) + if requestNeedsBetaFeatures(body) { + if beta := defaultApiKeyBetaHeader(body); beta != "" { + req.Header.Set("anthropic-beta", beta) + } + } } return req, nil diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index a0bf1b6a..b1877800 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -2278,11 +2278,13 @@ func convertClaudeToolsToGeminiTools(tools any) []any { "properties": map[string]any{}, } } + // 清理 JSON Schema + cleanedParams := cleanToolSchema(params) funcDecls = append(funcDecls, map[string]any{ "name": name, "description": desc, - "parameters": params, + "parameters": cleanedParams, }) } @@ -2296,6 +2298,41 @@ func convertClaudeToolsToGeminiTools(tools any) []any { } } +// cleanToolSchema 清理工具的 JSON Schema,移除 Gemini 不支持的字段 +func cleanToolSchema(schema any) any { + if schema == nil { + return nil + } + + switch v := schema.(type) { + case map[string]any: + cleaned := make(map[string]any) + for key, value := range v { + // 跳过不支持的字段 + if key == "$schema" || key == "$id" || key == "$ref" || + key == "additionalProperties" || key == "minLength" || + key == "maxLength" || key == "minItems" || key == "maxItems" { + continue + } + // 递归清理嵌套对象 + cleaned[key] = cleanToolSchema(value) + } + // 规范化 type 字段为大写 + if typeVal, ok := cleaned["type"].(string); ok { + cleaned["type"] = strings.ToUpper(typeVal) + } + return cleaned + case []any: + cleaned := make([]any, len(v)) + for i, item := range v { + cleaned[i] = cleanToolSchema(item) + } + return cleaned + default: + return v + } +} + func convertClaudeGenerationConfig(req map[string]any) map[string]any { out := make(map[string]any) if mt, ok := asInt(req["max_tokens"]); ok && mt > 0 { diff --git a/backend/internal/service/gemini_messages_compat_service_test.go b/backend/internal/service/gemini_messages_compat_service_test.go new file mode 100644 index 00000000..d49f2eb3 --- /dev/null +++ b/backend/internal/service/gemini_messages_compat_service_test.go @@ -0,0 +1,128 @@ +package service + +import ( + "testing" +) + +// TestConvertClaudeToolsToGeminiTools_CustomType 测试custom类型工具转换 +func TestConvertClaudeToolsToGeminiTools_CustomType(t *testing.T) { + tests := []struct { + name string + tools any + expectedLen int + description string + }{ + { + name: "Standard tools", + tools: []any{ + map[string]any{ + "name": "get_weather", + "description": "Get weather info", + "input_schema": map[string]any{"type": "object"}, + }, + }, + expectedLen: 1, + description: "标准工具格式应该正常转换", + }, + { + name: "Custom type tool (MCP format)", + tools: []any{ + map[string]any{ + "type": "custom", + "name": "mcp_tool", + "custom": map[string]any{ + "description": "MCP tool description", + "input_schema": map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, + description: "Custom类型工具应该从custom字段读取", + }, + { + name: "Mixed standard and custom tools", + tools: []any{ + map[string]any{ + "name": "standard_tool", + "description": "Standard", + "input_schema": map[string]any{"type": "object"}, + }, + map[string]any{ + "type": "custom", + "name": "custom_tool", + "custom": map[string]any{ + "description": "Custom", + "input_schema": map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, + description: "混合工具应该都能正确转换", + }, + { + name: "Custom tool without custom field", + tools: []any{ + map[string]any{ + "type": "custom", + "name": "invalid_custom", + // 缺少 custom 字段 + }, + }, + expectedLen: 0, // 应该被跳过 + description: "缺少custom字段的custom工具应该被跳过", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := convertClaudeToolsToGeminiTools(tt.tools) + + if tt.expectedLen == 0 { + if result != nil { + t.Errorf("%s: expected nil result, got %v", tt.description, result) + } + return + } + + if result == nil { + t.Fatalf("%s: expected non-nil result", tt.description) + } + + if len(result) != 1 { + t.Errorf("%s: expected 1 tool declaration, got %d", tt.description, len(result)) + return + } + + toolDecl, ok := result[0].(map[string]any) + if !ok { + t.Fatalf("%s: result[0] is not map[string]any", tt.description) + } + + funcDecls, ok := toolDecl["functionDeclarations"].([]any) + if !ok { + t.Fatalf("%s: functionDeclarations is not []any", tt.description) + } + + toolsArr, _ := tt.tools.([]any) + expectedFuncCount := 0 + for _, tool := range toolsArr { + toolMap, _ := tool.(map[string]any) + if toolMap["name"] != "" { + // 检查是否为有效的custom工具 + if toolMap["type"] == "custom" { + if toolMap["custom"] != nil { + expectedFuncCount++ + } + } else { + expectedFuncCount++ + } + } + } + + if len(funcDecls) != expectedFuncCount { + t.Errorf("%s: expected %d function declarations, got %d", + tt.description, expectedFuncCount, len(funcDecls)) + } + }) + } +} diff --git a/backend/internal/service/gemini_oauth_service.go b/backend/internal/service/gemini_oauth_service.go index e4bda5f8..221bd0f2 100644 --- a/backend/internal/service/gemini_oauth_service.go +++ b/backend/internal/service/gemini_oauth_service.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "regexp" "strconv" "strings" "time" @@ -163,6 +164,45 @@ type GeminiTokenInfo struct { Scope string `json:"scope,omitempty"` ProjectID string `json:"project_id,omitempty"` OAuthType string `json:"oauth_type,omitempty"` // "code_assist" 或 "ai_studio" + TierID string `json:"tier_id,omitempty"` // Gemini Code Assist tier: LEGACY/PRO/ULTRA +} + +// validateTierID validates tier_id format and length +func validateTierID(tierID string) error { + if tierID == "" { + return nil // Empty is allowed + } + if len(tierID) > 64 { + return fmt.Errorf("tier_id exceeds maximum length of 64 characters") + } + // Allow alphanumeric, underscore, hyphen, and slash (for tier paths) + if !regexp.MustCompile(`^[a-zA-Z0-9_/-]+$`).MatchString(tierID) { + return fmt.Errorf("tier_id contains invalid characters") + } + return nil +} + +// extractTierIDFromAllowedTiers extracts tierID from LoadCodeAssist response +// Prioritizes IsDefault tier, falls back to first non-empty tier +func extractTierIDFromAllowedTiers(allowedTiers []geminicli.AllowedTier) string { + tierID := "LEGACY" + // First pass: look for default tier + for _, tier := range allowedTiers { + if tier.IsDefault && strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + // Second pass: if still LEGACY, take first non-empty tier + if tierID == "LEGACY" { + for _, tier := range allowedTiers { + if strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + } + return tierID } func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExchangeCodeInput) (*GeminiTokenInfo, error) { @@ -223,13 +263,14 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - 300 projectID := sessionProjectID + var tierID string // 对于 code_assist 模式,project_id 是必需的 // 对于 ai_studio 模式,project_id 是可选的(不影响使用 AI Studio API) if oauthType == "code_assist" { if projectID == "" { var err error - projectID, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) + projectID, tierID, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) if err != nil { // 记录警告但不阻断流程,允许后续补充 project_id fmt.Printf("[GeminiOAuth] Warning: Failed to fetch project_id during token exchange: %v\n", err) @@ -248,6 +289,7 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch ExpiresAt: expiresAt, Scope: tokenResp.Scope, ProjectID: projectID, + TierID: tierID, OAuthType: oauthType, }, nil } @@ -357,7 +399,7 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A // For Code Assist, project_id is required. Auto-detect if missing. // For AI Studio OAuth, project_id is optional and should not block refresh. if oauthType == "code_assist" && strings.TrimSpace(tokenInfo.ProjectID) == "" { - projectID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) + projectID, tierID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) if err != nil { return nil, fmt.Errorf("failed to auto-detect project_id: %w", err) } @@ -366,6 +408,7 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A return nil, fmt.Errorf("failed to auto-detect project_id: empty result") } tokenInfo.ProjectID = projectID + tokenInfo.TierID = tierID } return tokenInfo, nil @@ -388,6 +431,13 @@ func (s *GeminiOAuthService) BuildAccountCredentials(tokenInfo *GeminiTokenInfo) if tokenInfo.ProjectID != "" { creds["project_id"] = tokenInfo.ProjectID } + if tokenInfo.TierID != "" { + // Validate tier_id before storing + if err := validateTierID(tokenInfo.TierID); err == nil { + creds["tier_id"] = tokenInfo.TierID + } + // Silently skip invalid tier_id (don't block account creation) + } if tokenInfo.OAuthType != "" { creds["oauth_type"] = tokenInfo.OAuthType } @@ -398,34 +448,26 @@ func (s *GeminiOAuthService) Stop() { s.sessionStore.Stop() } -func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, proxyURL string) (string, error) { +func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, proxyURL string) (string, string, error) { if s.codeAssist == nil { - return "", errors.New("code assist client not configured") + return "", "", errors.New("code assist client not configured") } loadResp, loadErr := s.codeAssist.LoadCodeAssist(ctx, accessToken, proxyURL, nil) + + // Extract tierID from response (works whether CloudAICompanionProject is set or not) + tierID := "LEGACY" + if loadResp != nil { + tierID = extractTierIDFromAllowedTiers(loadResp.AllowedTiers) + } + + // If LoadCodeAssist returned a project, use it if loadErr == nil && loadResp != nil && strings.TrimSpace(loadResp.CloudAICompanionProject) != "" { - return strings.TrimSpace(loadResp.CloudAICompanionProject), nil + return strings.TrimSpace(loadResp.CloudAICompanionProject), tierID, nil } // Pick tier from allowedTiers; if no default tier is marked, pick the first non-empty tier ID. - tierID := "LEGACY" - if loadResp != nil { - for _, tier := range loadResp.AllowedTiers { - if tier.IsDefault && strings.TrimSpace(tier.ID) != "" { - tierID = strings.TrimSpace(tier.ID) - break - } - } - if strings.TrimSpace(tierID) == "" || tierID == "LEGACY" { - for _, tier := range loadResp.AllowedTiers { - if strings.TrimSpace(tier.ID) != "" { - tierID = strings.TrimSpace(tier.ID) - break - } - } - } - } + // (tierID already extracted above, reuse it) req := &geminicli.OnboardUserRequest{ TierID: tierID, @@ -443,39 +485,39 @@ func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, pr // If Code Assist onboarding fails (e.g. INVALID_ARGUMENT), fallback to Cloud Resource Manager projects. fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), nil + return strings.TrimSpace(fallback), tierID, nil } - return "", err + return "", "", err } if resp.Done { if resp.Response != nil && resp.Response.CloudAICompanionProject != nil { switch v := resp.Response.CloudAICompanionProject.(type) { case string: - return strings.TrimSpace(v), nil + return strings.TrimSpace(v), tierID, nil case map[string]any: if id, ok := v["id"].(string); ok { - return strings.TrimSpace(id), nil + return strings.TrimSpace(id), tierID, nil } } } fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), nil + return strings.TrimSpace(fallback), tierID, nil } - return "", errors.New("onboardUser completed but no project_id returned") + return "", "", errors.New("onboardUser completed but no project_id returned") } time.Sleep(2 * time.Second) } fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), nil + return strings.TrimSpace(fallback), tierID, nil } if loadErr != nil { - return "", fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) + return "", "", fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) } - return "", fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) + return "", "", fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) } type googleCloudProject struct { diff --git a/backend/internal/service/gemini_token_provider.go b/backend/internal/service/gemini_token_provider.go index 2195ec55..5f369de5 100644 --- a/backend/internal/service/gemini_token_provider.go +++ b/backend/internal/service/gemini_token_provider.go @@ -112,7 +112,7 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou } } - detected, err := p.geminiOAuthService.fetchProjectID(ctx, accessToken, proxyURL) + detected, tierID, err := p.geminiOAuthService.fetchProjectID(ctx, accessToken, proxyURL) if err != nil { log.Printf("[GeminiTokenProvider] Auto-detect project_id failed: %v, fallback to AI Studio API mode", err) return accessToken, nil @@ -123,6 +123,9 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou account.Credentials = make(map[string]any) } account.Credentials["project_id"] = detected + if tierID != "" { + account.Credentials["tier_id"] = tierID + } _ = p.accountRepo.Update(ctx, account) } } diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 5bd85d7d..5478d151 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -122,6 +122,21 @@ pricing: # Hash check interval in minutes hash_check_interval_minutes: 10 +# ============================================================================= +# Gateway (Optional) +# ============================================================================= +gateway: + # Wait time (in seconds) for upstream response headers (streaming body not affected) + response_header_timeout: 300 + # Log upstream error response body summary (safe/truncated; does not log request content) + log_upstream_error_body: false + # Max bytes to log from upstream error body + log_upstream_error_body_max_bytes: 2048 + # Auto inject anthropic-beta for API-key accounts when needed (default off) + inject_beta_for_apikey: false + # Allow failover on selected 400 errors (default off) + failover_on_400: false + # ============================================================================= # Gemini OAuth (Required for Gemini accounts) # ============================================================================= diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 6563ee0c..1770a985 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -952,6 +952,7 @@ "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~6.21.0" } @@ -1367,6 +1368,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -1443,6 +1445,7 @@ "resolved": "https://registry.npmmirror.com/chart.js/-/chart.js-4.5.1.tgz", "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", "license": "MIT", + "peer": true, "dependencies": { "@kurkle/color": "^0.3.0" }, @@ -2040,6 +2043,7 @@ "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", "dev": true, "license": "MIT", + "peer": true, "bin": { "jiti": "bin/jiti.js" } @@ -2348,6 +2352,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -2821,6 +2826,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2854,6 +2860,7 @@ "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "devOptional": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -2926,6 +2933,7 @@ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", @@ -3097,6 +3105,7 @@ "resolved": "https://registry.npmmirror.com/vue/-/vue-3.5.25.tgz", "integrity": "sha512-YLVdgv2K13WJ6n+kD5owehKtEXwdwXuj2TTyJMsO7pSeKw2bfRNZGjhB7YzrpbMYj5b5QsUebHpOqR3R3ziy/g==", "license": "MIT", + "peer": true, "dependencies": { "@vue/compiler-dom": "3.5.25", "@vue/compiler-sfc": "3.5.25", @@ -3190,6 +3199,7 @@ "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@volar/typescript": "2.4.15", "@vue/language-core": "2.2.12" diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index c1ca08fa..914678a5 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -83,6 +83,14 @@ > + + + + {{ tierDisplay }} + @@ -140,4 +148,23 @@ const statusText = computed(() => { return props.account.status }) +// Computed: tier display +const tierDisplay = computed(() => { + const credentials = props.account.credentials as Record | undefined + const tierId = credentials?.tier_id + if (!tierId || tierId === 'unknown') return null + + const tierMap: Record = { + 'free': 'Free', + 'payg': 'Pay-as-you-go', + 'pay-as-you-go': 'Pay-as-you-go', + 'enterprise': 'Enterprise', + 'LEGACY': 'Legacy', + 'PRO': 'Pro', + 'ULTRA': 'Ultra' + } + + return tierMap[tierId] || tierId +}) + From 9c88980483fabaca21ce41574a7355085ec73c3b Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 04:26:01 +0800 Subject: [PATCH 16/51] =?UTF-8?q?fix(lint):=20=E4=BF=AE=E5=A4=8D=20golangc?= =?UTF-8?q?i-lint=20=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 修复 gofmt 格式问题 - 修复 staticcheck SA4031 nil check 问题(只在成功时设置 release 函数) - 删除未使用的 sortAccountsByPriority 函数 --- backend/internal/handler/gateway_handler.go | 16 ++++++++++------ .../internal/handler/gemini_v1beta_handler.go | 8 +++++--- backend/internal/pkg/antigravity/claude_types.go | 2 +- backend/internal/service/gateway_service.go | 6 ------ 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 769e6700..70b42ffe 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -192,9 +192,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { log.Printf("Account wait queue full: account=%d", account.ID) h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) return - } - accountWaitRelease = func() { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } else { + // Only set release function if increment succeeded + accountWaitRelease = func() { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } } accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( @@ -314,9 +316,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { log.Printf("Account wait queue full: account=%d", account.ID) h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) return - } - accountWaitRelease = func() { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } else { + // Only set release function if increment succeeded + accountWaitRelease = func() { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } } accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 1959c0f3..93ab23c9 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -233,9 +233,11 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { log.Printf("Account wait queue full: account=%d", account.ID) googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later") return - } - accountWaitRelease = func() { - geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } else { + // Only set release function if increment succeeded + accountWaitRelease = func() { + geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } } accountReleaseFunc, err = geminiConcurrency.AcquireAccountSlotWithWaitTimeout( diff --git a/backend/internal/pkg/antigravity/claude_types.go b/backend/internal/pkg/antigravity/claude_types.go index f394d7e3..01b805cd 100644 --- a/backend/internal/pkg/antigravity/claude_types.go +++ b/backend/internal/pkg/antigravity/claude_types.go @@ -41,7 +41,7 @@ type ClaudeMetadata struct { // 1. 标准格式: { "name": "...", "description": "...", "input_schema": {...} } // 2. Custom 格式 (MCP): { "type": "custom", "name": "...", "custom": { "description": "...", "input_schema": {...} } } type ClaudeTool struct { - Type string `json:"type,omitempty"` // "custom" 或空(标准格式) + Type string `json:"type,omitempty"` // "custom" 或空(标准格式) Name string `json:"name"` Description string `json:"description,omitempty"` // 标准格式使用 InputSchema map[string]any `json:"input_schema,omitempty"` // 标准格式使用 diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 6c45ff0f..af9342b1 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -681,12 +681,6 @@ func (s *GatewayService) tryAcquireAccountSlot(ctx context.Context, accountID in return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) } -func sortAccountsByPriority(accounts []*Account) { - sort.SliceStable(accounts, func(i, j int) bool { - return accounts[i].Priority < accounts[j].Priority - }) -} - func sortAccountsByPriorityAndLastUsed(accounts []*Account, preferOAuth bool) { sort.SliceStable(accounts, func(i, j int) bool { a, b := accounts[i], accounts[j] From e49281774d6e654e570ccc55ecd81878c5d28d01 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Wed, 31 Dec 2025 23:57:01 +0800 Subject: [PATCH 17/51] =?UTF-8?q?fix(gemini):=20=E4=BF=AE=E5=A4=8D=20P0/P1?= =?UTF-8?q?=20=E7=BA=A7=E5=88=AB=E9=97=AE=E9=A2=98=EF=BC=88429=E8=AF=AF?= =?UTF-8?q?=E5=88=A4/Tier=E4=B8=A2=E5=A4=B1/expires=5Fat/=E5=89=8D?= =?UTF-8?q?=E7=AB=AF=E4=B8=80=E8=87=B4=E6=80=A7=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit P0 修复(Critical - 影响生产稳定性): - 修复 429 判断逻辑:使用 project_id 判断而非 account.Type 防止 AI Studio OAuth 被误判为 Code Assist 5分钟窗口 - 修复 Tier ID 丢失:刷新时始终保留旧值,默认 LEGACY 防止 fetchProjectID 失败导致 tier_id 被清空 - 修复 expires_at 下界:添加 minTTL=30s 保护 防止 expires_in <= 300 时生成过去时间引发刷新风暴 P1 修复(Important - 行为一致性): - 前端 isCodeAssist 判断与后端一致(支持 legacy) - 前端日期解析添加 NaN 保护 - 迁移脚本覆盖 legacy 账号 前端功能(新增): - AccountQuotaInfo 组件:Tier Badge + 二元进度条 + 倒计时 - 定时器动态管理:watch 监听限流状态 - 类型定义:GeminiCredentials 接口 测试: - ✅ TypeScript 类型检查通过 - ✅ 前端构建成功(3.33s) - ✅ Gemini + Codex 双 AI 审查通过 Refs: #gemini-quota --- .../service/gemini_messages_compat_service.go | 38 +++- .../internal/service/gemini_oauth_service.go | 81 ++++++-- backend/migrations/017_add_gemini_tier_id.sql | 30 +++ .../components/account/AccountQuotaInfo.vue | 194 ++++++++++++++++++ .../components/account/AccountUsageCell.vue | 12 +- frontend/src/types/index.ts | 16 ++ 6 files changed, 349 insertions(+), 22 deletions(-) create mode 100644 backend/migrations/017_add_gemini_tier_id.sql create mode 100644 frontend/src/components/account/AccountQuotaInfo.vue diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index b1877800..111ff462 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -1886,13 +1886,47 @@ func (s *GeminiMessagesCompatService) handleGeminiUpstreamError(ctx context.Cont if statusCode != 429 { return } + + // 获取账号的 oauth_type、tier_id 和 project_id + oauthType := strings.TrimSpace(account.GetCredential("oauth_type")) + tierID := strings.TrimSpace(account.GetCredential("tier_id")) + projectID := strings.TrimSpace(account.GetCredential("project_id")) + + // 判断是否为 Code Assist:以 project_id 是否存在为准(更可靠) + isCodeAssist := projectID != "" + // Legacy 兼容:oauth_type 为空但 project_id 存在时视为 code_assist + if oauthType == "" && isCodeAssist { + oauthType = "code_assist" + } + resetAt := ParseGeminiRateLimitResetTime(body) if resetAt == nil { - ra := time.Now().Add(5 * time.Minute) + // 根据账号类型使用不同的默认重置时间 + var ra time.Time + if isCodeAssist { + // Code Assist: 5 分钟滚动窗口 + ra = time.Now().Add(5 * time.Minute) + log.Printf("[Gemini 429] Account %d (Code Assist, tier=%s, project=%s) rate limited, reset in 5min", account.ID, tierID, projectID) + } else { + // API Key / AI Studio OAuth: PST 午夜 + if ts := nextGeminiDailyResetUnix(); ts != nil { + ra = time.Unix(*ts, 0) + log.Printf("[Gemini 429] Account %d (API Key/AI Studio, type=%s) rate limited, reset at PST midnight (%v)", account.ID, account.Type, ra) + } else { + // 兜底:5 分钟 + ra = time.Now().Add(5 * time.Minute) + log.Printf("[Gemini 429] Account %d rate limited, fallback to 5min", account.ID) + } + } _ = s.accountRepo.SetRateLimited(ctx, account.ID, ra) return } - _ = s.accountRepo.SetRateLimited(ctx, account.ID, time.Unix(*resetAt, 0)) + + // 使用解析到的重置时间 + resetTime := time.Unix(*resetAt, 0) + _ = s.accountRepo.SetRateLimited(ctx, account.ID, resetTime) + log.Printf("[Gemini 429] Account %d rate limited until %v (oauth_type=%s, tier=%s)", + account.ID, resetTime, oauthType, tierID) } // ParseGeminiRateLimitResetTime 解析 Gemini 格式的 429 响应,返回重置时间的 Unix 时间戳 diff --git a/backend/internal/service/gemini_oauth_service.go b/backend/internal/service/gemini_oauth_service.go index 221bd0f2..d1c1c5f6 100644 --- a/backend/internal/service/gemini_oauth_service.go +++ b/backend/internal/service/gemini_oauth_service.go @@ -259,8 +259,15 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch sessionProjectID := strings.TrimSpace(session.ProjectID) s.sessionStore.Delete(input.SessionID) - // 计算过期时间时减去 5 分钟安全时间窗口,考虑网络延迟和时钟偏差 - expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - 300 + // 计算过期时间:减去 5 分钟安全时间窗口(考虑网络延迟和时钟偏差) + // 同时设置下界保护,防止 expires_in 过小导致过去时间(引发刷新风暴) + const safetyWindow = 300 // 5 minutes + const minTTL = 30 // minimum 30 seconds + expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - safetyWindow + minExpiresAt := time.Now().Unix() + minTTL + if expiresAt < minExpiresAt { + expiresAt = minExpiresAt + } projectID := sessionProjectID var tierID string @@ -275,10 +282,22 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch // 记录警告但不阻断流程,允许后续补充 project_id fmt.Printf("[GeminiOAuth] Warning: Failed to fetch project_id during token exchange: %v\n", err) } + } else { + // 用户手动填了 project_id,仍需调用 LoadCodeAssist 获取 tierID + _, fetchedTierID, err := s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) + if err != nil { + fmt.Printf("[GeminiOAuth] Warning: Failed to fetch tierID: %v\n", err) + } else { + tierID = fetchedTierID + } } if strings.TrimSpace(projectID) == "" { return nil, fmt.Errorf("missing project_id for Code Assist OAuth: please fill Project ID (optional field) and regenerate the auth URL, or ensure your Google account has an ACTIVE GCP project") } + // tierID 缺失时使用默认值 + if tierID == "" { + tierID = "LEGACY" + } } return &GeminiTokenInfo{ @@ -308,8 +327,15 @@ func (s *GeminiOAuthService) RefreshToken(ctx context.Context, oauthType, refres tokenResp, err := s.oauthClient.RefreshToken(ctx, oauthType, refreshToken, proxyURL) if err == nil { - // 计算过期时间时减去 5 分钟安全时间窗口,考虑网络延迟和时钟偏差 - expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - 300 + // 计算过期时间:减去 5 分钟安全时间窗口(考虑网络延迟和时钟偏差) + // 同时设置下界保护,防止 expires_in 过小导致过去时间(引发刷新风暴) + const safetyWindow = 300 // 5 minutes + const minTTL = 30 // minimum 30 seconds + expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - safetyWindow + minExpiresAt := time.Now().Unix() + minTTL + if expiresAt < minExpiresAt { + expiresAt = minExpiresAt + } return &GeminiTokenInfo{ AccessToken: tokenResp.AccessToken, RefreshToken: tokenResp.RefreshToken, @@ -396,19 +422,39 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A tokenInfo.ProjectID = existingProjectID } + // 尝试从账号凭证获取 tierID(向后兼容) + existingTierID := strings.TrimSpace(account.GetCredential("tier_id")) + // For Code Assist, project_id is required. Auto-detect if missing. // For AI Studio OAuth, project_id is optional and should not block refresh. - if oauthType == "code_assist" && strings.TrimSpace(tokenInfo.ProjectID) == "" { - projectID, tierID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) - if err != nil { - return nil, fmt.Errorf("failed to auto-detect project_id: %w", err) + if oauthType == "code_assist" { + // 先设置默认值或保留旧值,确保 tier_id 始终有值 + if existingTierID != "" { + tokenInfo.TierID = existingTierID + } else { + tokenInfo.TierID = "LEGACY" // 默认值 } - projectID = strings.TrimSpace(projectID) - if projectID == "" { + + // 尝试自动探测 project_id 和 tier_id + needDetect := strings.TrimSpace(tokenInfo.ProjectID) == "" || existingTierID == "" + if needDetect { + projectID, tierID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) + if err != nil { + fmt.Printf("[GeminiOAuth] Warning: failed to auto-detect project/tier: %v\n", err) + } else { + if strings.TrimSpace(tokenInfo.ProjectID) == "" && projectID != "" { + tokenInfo.ProjectID = projectID + } + // 只有当原来没有 tier_id 且探测成功时才更新 + if existingTierID == "" && tierID != "" { + tokenInfo.TierID = tierID + } + } + } + + if strings.TrimSpace(tokenInfo.ProjectID) == "" { return nil, fmt.Errorf("failed to auto-detect project_id: empty result") } - tokenInfo.ProjectID = projectID - tokenInfo.TierID = tierID } return tokenInfo, nil @@ -466,9 +512,6 @@ func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, pr return strings.TrimSpace(loadResp.CloudAICompanionProject), tierID, nil } - // Pick tier from allowedTiers; if no default tier is marked, pick the first non-empty tier ID. - // (tierID already extracted above, reuse it) - req := &geminicli.OnboardUserRequest{ TierID: tierID, Metadata: geminicli.LoadCodeAssistMetadata{ @@ -487,7 +530,7 @@ func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, pr if fbErr == nil && strings.TrimSpace(fallback) != "" { return strings.TrimSpace(fallback), tierID, nil } - return "", "", err + return "", tierID, err } if resp.Done { if resp.Response != nil && resp.Response.CloudAICompanionProject != nil { @@ -505,7 +548,7 @@ func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, pr if fbErr == nil && strings.TrimSpace(fallback) != "" { return strings.TrimSpace(fallback), tierID, nil } - return "", "", errors.New("onboardUser completed but no project_id returned") + return "", tierID, errors.New("onboardUser completed but no project_id returned") } time.Sleep(2 * time.Second) } @@ -515,9 +558,9 @@ func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, pr return strings.TrimSpace(fallback), tierID, nil } if loadErr != nil { - return "", "", fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) + return "", tierID, fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) } - return "", "", fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) + return "", tierID, fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) } type googleCloudProject struct { diff --git a/backend/migrations/017_add_gemini_tier_id.sql b/backend/migrations/017_add_gemini_tier_id.sql new file mode 100644 index 00000000..0388a412 --- /dev/null +++ b/backend/migrations/017_add_gemini_tier_id.sql @@ -0,0 +1,30 @@ +-- +goose Up +-- +goose StatementBegin +-- 为 Gemini Code Assist OAuth 账号添加默认 tier_id +-- 包括显式标记为 code_assist 的账号,以及 legacy 账号(oauth_type 为空但 project_id 存在) +UPDATE accounts +SET credentials = jsonb_set( + credentials, + '{tier_id}', + '"LEGACY"', + true +) +WHERE platform = 'gemini' + AND type = 'oauth' + AND jsonb_typeof(credentials) = 'object' + AND credentials->>'tier_id' IS NULL + AND ( + credentials->>'oauth_type' = 'code_assist' + OR (credentials->>'oauth_type' IS NULL AND credentials->>'project_id' IS NOT NULL) + ); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- 回滚:删除 tier_id 字段 +UPDATE accounts +SET credentials = credentials - 'tier_id' +WHERE platform = 'gemini' + AND type = 'oauth' + AND credentials->>'oauth_type' = 'code_assist'; +-- +goose StatementEnd diff --git a/frontend/src/components/account/AccountQuotaInfo.vue b/frontend/src/components/account/AccountQuotaInfo.vue new file mode 100644 index 00000000..44fe1b41 --- /dev/null +++ b/frontend/src/components/account/AccountQuotaInfo.vue @@ -0,0 +1,194 @@ + + + diff --git a/frontend/src/components/account/AccountUsageCell.vue b/frontend/src/components/account/AccountUsageCell.vue index d064c55a..d457c2ff 100644 --- a/frontend/src/components/account/AccountUsageCell.vue +++ b/frontend/src/components/account/AccountUsageCell.vue @@ -169,6 +169,11 @@
-
+ + + From c5c12d4c8b44cbfecf2ee22ae3fd7810f724c638 Mon Sep 17 00:00:00 2001 From: Wesley Liddick Date: Wed, 31 Dec 2025 21:45:42 -0500 Subject: [PATCH 32/51] =?UTF-8?q?Revert=20"feat(gateway):=20=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E8=B4=9F=E8=BD=BD=E6=84=9F=E7=9F=A5=E7=9A=84=E8=B4=A6?= =?UTF-8?q?=E5=8F=B7=E8=B0=83=E5=BA=A6=E4=BC=98=E5=8C=96=20(#114)"=20(#117?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 8d252303fc4a6325956234079ce3fb676f680595. --- backend/cmd/server/wire_gen.go | 6 +- backend/internal/config/config.go | 57 -- backend/internal/config/config_test.go | 49 +- backend/internal/handler/gateway_handler.go | 112 +--- backend/internal/handler/gateway_helper.go | 22 +- .../internal/handler/gemini_v1beta_handler.go | 53 +- .../handler/openai_gateway_handler.go | 51 +- .../internal/pkg/antigravity/claude_types.go | 3 - .../pkg/antigravity/request_transformer.go | 223 ++------ .../antigravity/request_transformer_test.go | 179 ------ backend/internal/pkg/claude/constants.go | 6 - .../internal/repository/concurrency_cache.go | 185 +------ .../concurrency_cache_benchmark_test.go | 2 +- .../concurrency_cache_integration_test.go | 177 +----- backend/internal/repository/wire.go | 9 +- .../service/antigravity_gateway_service.go | 9 - .../internal/service/concurrency_service.go | 110 ---- .../service/gateway_multiplatform_test.go | 211 ------- backend/internal/service/gateway_service.go | 519 +----------------- .../service/gemini_messages_compat_service.go | 39 +- .../gemini_messages_compat_service_test.go | 128 ----- .../internal/service/gemini_oauth_service.go | 104 ++-- .../internal/service/gemini_token_provider.go | 5 +- .../service/openai_gateway_service.go | 260 --------- backend/internal/service/wire.go | 11 +- deploy/config.example.yaml | 15 - deploy/flow.md | 222 -------- frontend/package-lock.json | 10 - .../account/AccountStatusIndicator.vue | 27 - 29 files changed, 133 insertions(+), 2671 deletions(-) delete mode 100644 backend/internal/pkg/antigravity/request_transformer_test.go delete mode 100644 backend/internal/service/gemini_messages_compat_service_test.go delete mode 100644 deploy/flow.md diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 1adabefe..83cba823 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -99,7 +99,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, antigravityTokenProvider, rateLimitService, httpUpstream) accountTestService := service.NewAccountTestService(accountRepository, oAuthService, openAIOAuthService, geminiTokenProvider, antigravityGatewayService, httpUpstream) concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig) - concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig) + concurrencyService := service.NewConcurrencyService(concurrencyCache) crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService) accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService) oAuthHandler := admin.NewOAuthHandler(oAuthService) @@ -127,10 +127,10 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { identityService := service.NewIdentityService(identityCache) timingWheelService := service.ProvideTimingWheelService() deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) - gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService) + gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService) geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService) gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService) - openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService) + openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 7927fec5..aeeddcb4 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -3,7 +3,6 @@ package config import ( "fmt" "strings" - "time" "github.com/spf13/viper" ) @@ -120,37 +119,6 @@ type GatewayConfig struct { // ConcurrencySlotTTLMinutes: 并发槽位过期时间(分钟) // 应大于最长 LLM 请求时间,防止请求完成前槽位过期 ConcurrencySlotTTLMinutes int `mapstructure:"concurrency_slot_ttl_minutes"` - - // 是否记录上游错误响应体摘要(避免输出请求内容) - LogUpstreamErrorBody bool `mapstructure:"log_upstream_error_body"` - // 上游错误响应体记录最大字节数(超过会截断) - LogUpstreamErrorBodyMaxBytes int `mapstructure:"log_upstream_error_body_max_bytes"` - - // API-key 账号在客户端未提供 anthropic-beta 时,是否按需自动补齐(默认关闭以保持兼容) - InjectBetaForApiKey bool `mapstructure:"inject_beta_for_apikey"` - - // 是否允许对部分 400 错误触发 failover(默认关闭以避免改变语义) - FailoverOn400 bool `mapstructure:"failover_on_400"` - - // Scheduling: 账号调度相关配置 - Scheduling GatewaySchedulingConfig `mapstructure:"scheduling"` -} - -// GatewaySchedulingConfig accounts scheduling configuration. -type GatewaySchedulingConfig struct { - // 粘性会话排队配置 - StickySessionMaxWaiting int `mapstructure:"sticky_session_max_waiting"` - StickySessionWaitTimeout time.Duration `mapstructure:"sticky_session_wait_timeout"` - - // 兜底排队配置 - FallbackWaitTimeout time.Duration `mapstructure:"fallback_wait_timeout"` - FallbackMaxWaiting int `mapstructure:"fallback_max_waiting"` - - // 负载计算 - LoadBatchEnabled bool `mapstructure:"load_batch_enabled"` - - // 过期槽位清理周期(0 表示禁用) - SlotCleanupInterval time.Duration `mapstructure:"slot_cleanup_interval"` } func (s *ServerConfig) Address() string { @@ -345,10 +313,6 @@ func setDefaults() { // Gateway viper.SetDefault("gateway.response_header_timeout", 300) // 300秒(5分钟)等待上游响应头,LLM高负载时可能排队较久 - viper.SetDefault("gateway.log_upstream_error_body", false) - viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048) - viper.SetDefault("gateway.inject_beta_for_apikey", false) - viper.SetDefault("gateway.failover_on_400", false) viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) viper.SetDefault("gateway.connection_pool_isolation", ConnectionPoolIsolationAccountProxy) // HTTP 上游连接池配置(针对 5000+ 并发用户优化) @@ -359,12 +323,6 @@ func setDefaults() { viper.SetDefault("gateway.max_upstream_clients", 5000) viper.SetDefault("gateway.client_idle_ttl_seconds", 900) viper.SetDefault("gateway.concurrency_slot_ttl_minutes", 15) // 并发槽位过期时间(支持超长请求) - viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3) - viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 45*time.Second) - viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second) - viper.SetDefault("gateway.scheduling.fallback_max_waiting", 100) - viper.SetDefault("gateway.scheduling.load_batch_enabled", true) - viper.SetDefault("gateway.scheduling.slot_cleanup_interval", 30*time.Second) // TokenRefresh viper.SetDefault("token_refresh.enabled", true) @@ -453,21 +411,6 @@ func (c *Config) Validate() error { if c.Gateway.ConcurrencySlotTTLMinutes <= 0 { return fmt.Errorf("gateway.concurrency_slot_ttl_minutes must be positive") } - if c.Gateway.Scheduling.StickySessionMaxWaiting <= 0 { - return fmt.Errorf("gateway.scheduling.sticky_session_max_waiting must be positive") - } - if c.Gateway.Scheduling.StickySessionWaitTimeout <= 0 { - return fmt.Errorf("gateway.scheduling.sticky_session_wait_timeout must be positive") - } - if c.Gateway.Scheduling.FallbackWaitTimeout <= 0 { - return fmt.Errorf("gateway.scheduling.fallback_wait_timeout must be positive") - } - if c.Gateway.Scheduling.FallbackMaxWaiting <= 0 { - return fmt.Errorf("gateway.scheduling.fallback_max_waiting must be positive") - } - if c.Gateway.Scheduling.SlotCleanupInterval < 0 { - return fmt.Errorf("gateway.scheduling.slot_cleanup_interval must be non-negative") - } return nil } diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index 6e722a54..1f1becb8 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -1,11 +1,6 @@ package config -import ( - "testing" - "time" - - "github.com/spf13/viper" -) +import "testing" func TestNormalizeRunMode(t *testing.T) { tests := []struct { @@ -26,45 +21,3 @@ func TestNormalizeRunMode(t *testing.T) { } } } - -func TestLoadDefaultSchedulingConfig(t *testing.T) { - viper.Reset() - - cfg, err := Load() - if err != nil { - t.Fatalf("Load() error: %v", err) - } - - if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 3 { - t.Fatalf("StickySessionMaxWaiting = %d, want 3", cfg.Gateway.Scheduling.StickySessionMaxWaiting) - } - if cfg.Gateway.Scheduling.StickySessionWaitTimeout != 45*time.Second { - t.Fatalf("StickySessionWaitTimeout = %v, want 45s", cfg.Gateway.Scheduling.StickySessionWaitTimeout) - } - if cfg.Gateway.Scheduling.FallbackWaitTimeout != 30*time.Second { - t.Fatalf("FallbackWaitTimeout = %v, want 30s", cfg.Gateway.Scheduling.FallbackWaitTimeout) - } - if cfg.Gateway.Scheduling.FallbackMaxWaiting != 100 { - t.Fatalf("FallbackMaxWaiting = %d, want 100", cfg.Gateway.Scheduling.FallbackMaxWaiting) - } - if !cfg.Gateway.Scheduling.LoadBatchEnabled { - t.Fatalf("LoadBatchEnabled = false, want true") - } - if cfg.Gateway.Scheduling.SlotCleanupInterval != 30*time.Second { - t.Fatalf("SlotCleanupInterval = %v, want 30s", cfg.Gateway.Scheduling.SlotCleanupInterval) - } -} - -func TestLoadSchedulingConfigFromEnv(t *testing.T) { - viper.Reset() - t.Setenv("GATEWAY_SCHEDULING_STICKY_SESSION_MAX_WAITING", "5") - - cfg, err := Load() - if err != nil { - t.Fatalf("Load() error: %v", err) - } - - if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 5 { - t.Fatalf("StickySessionMaxWaiting = %d, want 5", cfg.Gateway.Scheduling.StickySessionMaxWaiting) - } -} diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 70b42ffe..a2f833ff 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -141,10 +141,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } else if apiKey.Group != nil { platform = apiKey.Group.Platform } - sessionKey := sessionHash - if platform == service.PlatformGemini && sessionHash != "" { - sessionKey = "gemini:" + sessionHash - } if platform == service.PlatformGemini { const maxAccountSwitches = 3 @@ -153,7 +149,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { lastFailoverStatus := 0 for { - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) + account, err := h.geminiCompatService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) if err != nil { if len(failedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) @@ -162,13 +158,9 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } - account := selection.Account // 检查预热请求拦截(在账号选择后、转发前检查) if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() - } if reqStream { sendMockWarmupStream(c, reqModel) } else { @@ -178,46 +170,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } // 3. 获取账号并发槽位 - accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() - if !selection.Acquired { - if selection.WaitPlan == nil { - h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) - return - } - canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) - if err != nil { - log.Printf("Increment account wait count failed: %v", err) - } else if !canWait { - log.Printf("Account wait queue full: account=%d", account.ID) - h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) - return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) - } - } - - accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( - c, - account.ID, - selection.WaitPlan.MaxConcurrency, - selection.WaitPlan.Timeout, - reqStream, - &streamStarted, - ) - if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } - log.Printf("Account concurrency acquire failed: %v", err) - h.handleConcurrencyError(c, err, "account", streamStarted) - return - } - if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionKey, account.ID); err != nil { - log.Printf("Bind sticky session failed: %v", err) - } + accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, reqStream, &streamStarted) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return } // 转发请求 - 根据账号平台分流 @@ -230,9 +187,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { @@ -277,7 +231,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { for { // 选择支持该模型的账号 - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) + account, err := h.gatewayService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) if err != nil { if len(failedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) @@ -286,13 +240,9 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } - account := selection.Account // 检查预热请求拦截(在账号选择后、转发前检查) if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { - if selection.Acquired && selection.ReleaseFunc != nil { - selection.ReleaseFunc() - } if reqStream { sendMockWarmupStream(c, reqModel) } else { @@ -302,46 +252,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } // 3. 获取账号并发槽位 - accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() - if !selection.Acquired { - if selection.WaitPlan == nil { - h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) - return - } - canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) - if err != nil { - log.Printf("Increment account wait count failed: %v", err) - } else if !canWait { - log.Printf("Account wait queue full: account=%d", account.ID) - h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) - return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) - } - } - - accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( - c, - account.ID, - selection.WaitPlan.MaxConcurrency, - selection.WaitPlan.Timeout, - reqStream, - &streamStarted, - ) - if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } - log.Printf("Account concurrency acquire failed: %v", err) - h.handleConcurrencyError(c, err, "account", streamStarted) - return - } - if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionKey, account.ID); err != nil { - log.Printf("Bind sticky session failed: %v", err) - } + accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, reqStream, &streamStarted) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return } // 转发请求 - 根据账号平台分流 @@ -354,9 +269,6 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { diff --git a/backend/internal/handler/gateway_helper.go b/backend/internal/handler/gateway_helper.go index 4e049dbb..4c7bd0f0 100644 --- a/backend/internal/handler/gateway_helper.go +++ b/backend/internal/handler/gateway_helper.go @@ -83,16 +83,6 @@ func (h *ConcurrencyHelper) DecrementWaitCount(ctx context.Context, userID int64 h.concurrencyService.DecrementWaitCount(ctx, userID) } -// IncrementAccountWaitCount increments the wait count for an account -func (h *ConcurrencyHelper) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { - return h.concurrencyService.IncrementAccountWaitCount(ctx, accountID, maxWait) -} - -// DecrementAccountWaitCount decrements the wait count for an account -func (h *ConcurrencyHelper) DecrementAccountWaitCount(ctx context.Context, accountID int64) { - h.concurrencyService.DecrementAccountWaitCount(ctx, accountID) -} - // AcquireUserSlotWithWait acquires a user concurrency slot, waiting if necessary. // For streaming requests, sends ping events during the wait. // streamStarted is updated if streaming response has begun. @@ -136,12 +126,7 @@ func (h *ConcurrencyHelper) AcquireAccountSlotWithWait(c *gin.Context, accountID // waitForSlotWithPing waits for a concurrency slot, sending ping events for streaming requests. // streamStarted pointer is updated when streaming begins (for proper error handling by caller). func (h *ConcurrencyHelper) waitForSlotWithPing(c *gin.Context, slotType string, id int64, maxConcurrency int, isStream bool, streamStarted *bool) (func(), error) { - return h.waitForSlotWithPingTimeout(c, slotType, id, maxConcurrency, maxConcurrencyWait, isStream, streamStarted) -} - -// waitForSlotWithPingTimeout waits for a concurrency slot with a custom timeout. -func (h *ConcurrencyHelper) waitForSlotWithPingTimeout(c *gin.Context, slotType string, id int64, maxConcurrency int, timeout time.Duration, isStream bool, streamStarted *bool) (func(), error) { - ctx, cancel := context.WithTimeout(c.Request.Context(), timeout) + ctx, cancel := context.WithTimeout(c.Request.Context(), maxConcurrencyWait) defer cancel() // Determine if ping is needed (streaming + ping format defined) @@ -215,11 +200,6 @@ func (h *ConcurrencyHelper) waitForSlotWithPingTimeout(c *gin.Context, slotType } } -// AcquireAccountSlotWithWaitTimeout acquires an account slot with a custom timeout (keeps SSE ping). -func (h *ConcurrencyHelper) AcquireAccountSlotWithWaitTimeout(c *gin.Context, accountID int64, maxConcurrency int, timeout time.Duration, isStream bool, streamStarted *bool) (func(), error) { - return h.waitForSlotWithPingTimeout(c, "account", accountID, maxConcurrency, timeout, isStream, streamStarted) -} - // nextBackoff 计算下一次退避时间 // 性能优化:使用指数退避 + 随机抖动,避免惊群效应 // current: 当前退避时间 diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 93ab23c9..4e99e00d 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -197,17 +197,13 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { // 3) select account (sticky session based on request body) parsedReq, _ := service.ParseGatewayRequest(body) sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) - sessionKey := sessionHash - if sessionHash != "" { - sessionKey = "gemini:" + sessionHash - } const maxAccountSwitches = 3 switchCount := 0 failedAccountIDs := make(map[int64]struct{}) lastFailoverStatus := 0 for { - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, modelName, failedAccountIDs) + account, err := h.geminiCompatService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, modelName, failedAccountIDs) if err != nil { if len(failedAccountIDs) == 0 { googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) @@ -216,48 +212,12 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { handleGeminiFailoverExhausted(c, lastFailoverStatus) return } - account := selection.Account // 4) account concurrency slot - accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() - if !selection.Acquired { - if selection.WaitPlan == nil { - googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts") - return - } - canWait, err := geminiConcurrency.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) - if err != nil { - log.Printf("Increment account wait count failed: %v", err) - } else if !canWait { - log.Printf("Account wait queue full: account=%d", account.ID) - googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later") - return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { - geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) - } - } - - accountReleaseFunc, err = geminiConcurrency.AcquireAccountSlotWithWaitTimeout( - c, - account.ID, - selection.WaitPlan.MaxConcurrency, - selection.WaitPlan.Timeout, - stream, - &streamStarted, - ) - if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } - googleError(c, http.StatusTooManyRequests, err.Error()) - return - } - if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionKey, account.ID); err != nil { - log.Printf("Bind sticky session failed: %v", err) - } + accountReleaseFunc, err := geminiConcurrency.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, stream, &streamStarted) + if err != nil { + googleError(c, http.StatusTooManyRequests, err.Error()) + return } // 5) forward (根据平台分流) @@ -270,9 +230,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 9931052d..7c9934c6 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -146,7 +146,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { for { // Select account supporting the requested model log.Printf("[OpenAI Handler] Selecting account: groupID=%v model=%s", apiKey.GroupID, reqModel) - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) + account, err := h.gatewayService.SelectAccountForModelWithExclusions(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) if err != nil { log.Printf("[OpenAI Handler] SelectAccount failed: %v", err) if len(failedAccountIDs) == 0 { @@ -156,50 +156,14 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } - account := selection.Account log.Printf("[OpenAI Handler] Selected account: id=%d name=%s", account.ID, account.Name) // 3. Acquire account concurrency slot - accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() - if !selection.Acquired { - if selection.WaitPlan == nil { - h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) - return - } - canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) - if err != nil { - log.Printf("Increment account wait count failed: %v", err) - } else if !canWait { - log.Printf("Account wait queue full: account=%d", account.ID) - h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) - return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) - } - } - - accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( - c, - account.ID, - selection.WaitPlan.MaxConcurrency, - selection.WaitPlan.Timeout, - reqStream, - &streamStarted, - ) - if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } - log.Printf("Account concurrency acquire failed: %v", err) - h.handleConcurrencyError(c, err, "account", streamStarted) - return - } - if err := h.gatewayService.BindStickySession(c.Request.Context(), sessionHash, account.ID); err != nil { - log.Printf("Bind sticky session failed: %v", err) - } + accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account.ID, account.Concurrency, reqStream, &streamStarted) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return } // Forward request @@ -207,9 +171,6 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { diff --git a/backend/internal/pkg/antigravity/claude_types.go b/backend/internal/pkg/antigravity/claude_types.go index 34e6b1f4..01b805cd 100644 --- a/backend/internal/pkg/antigravity/claude_types.go +++ b/backend/internal/pkg/antigravity/claude_types.go @@ -54,9 +54,6 @@ type CustomToolSpec struct { InputSchema map[string]any `json:"input_schema"` } -// ClaudeCustomToolSpec 兼容旧命名(MCP custom 工具规格) -type ClaudeCustomToolSpec = CustomToolSpec - // SystemBlock system prompt 数组形式的元素 type SystemBlock struct { Type string `json:"type"` diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index 83b87a32..e0b5b886 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -14,16 +14,13 @@ func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel st // 用于存储 tool_use id -> name 映射 toolIDToName := make(map[string]string) + // 检测是否启用 thinking + isThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" + // 只有 Gemini 模型支持 dummy thought workaround // Claude 模型通过 Vertex/Google API 需要有效的 thought signatures allowDummyThought := strings.HasPrefix(mappedModel, "gemini-") - // 检测是否启用 thinking - requestedThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" - // 为避免 Claude 模型的 thought signature/消息块约束导致 400(上游要求 thinking 块开头等), - // 非 Gemini 模型默认不启用 thinking(除非未来支持完整签名链路)。 - isThinkingEnabled := requestedThinkingEnabled && allowDummyThought - // 1. 构建 contents contents, err := buildContents(claudeReq.Messages, toolIDToName, isThinkingEnabled, allowDummyThought) if err != nil { @@ -34,15 +31,7 @@ func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel st systemInstruction := buildSystemInstruction(claudeReq.System, claudeReq.Model) // 3. 构建 generationConfig - reqForGen := claudeReq - if requestedThinkingEnabled && !allowDummyThought { - log.Printf("[Warning] Disabling thinking for non-Gemini model in antigravity transform: model=%s", mappedModel) - // shallow copy to avoid mutating caller's request - clone := *claudeReq - clone.Thinking = nil - reqForGen = &clone - } - generationConfig := buildGenerationConfig(reqForGen) + generationConfig := buildGenerationConfig(claudeReq) // 4. 构建 tools tools := buildTools(claudeReq.Tools) @@ -159,9 +148,8 @@ func buildContents(messages []ClaudeMessage, toolIDToName map[string]string, isT if !hasThoughtPart && len(parts) > 0 { // 在开头添加 dummy thinking block parts = append([]GeminiPart{{ - Text: "Thinking...", - Thought: true, - ThoughtSignature: dummyThoughtSignature, + Text: "Thinking...", + Thought: true, }}, parts...) } } @@ -183,34 +171,6 @@ func buildContents(messages []ClaudeMessage, toolIDToName map[string]string, isT // 参考: https://ai.google.dev/gemini-api/docs/thought-signatures const dummyThoughtSignature = "skip_thought_signature_validator" -// isValidThoughtSignature 验证 thought signature 是否有效 -// Claude API 要求 signature 必须是 base64 编码的字符串,长度至少 32 字节 -func isValidThoughtSignature(signature string) bool { - // 空字符串无效 - if signature == "" { - return false - } - - // signature 应该是 base64 编码,长度至少 40 个字符(约 30 字节) - // 参考 Claude API 文档和实际观察到的有效 signature - if len(signature) < 40 { - log.Printf("[Debug] Signature too short: len=%d", len(signature)) - return false - } - - // 检查是否是有效的 base64 字符 - // base64 字符集: A-Z, a-z, 0-9, +, /, = - for i, c := range signature { - if (c < 'A' || c > 'Z') && (c < 'a' || c > 'z') && - (c < '0' || c > '9') && c != '+' && c != '/' && c != '=' { - log.Printf("[Debug] Invalid base64 character at position %d: %c (code=%d)", i, c, c) - return false - } - } - - return true -} - // buildParts 构建消息的 parts // allowDummyThought: 只有 Gemini 模型支持 dummy thought signature func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDummyThought bool) ([]GeminiPart, error) { @@ -239,30 +199,22 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu } case "thinking": - if allowDummyThought { - // Gemini 模型可以使用 dummy signature - parts = append(parts, GeminiPart{ - Text: block.Thinking, - Thought: true, - ThoughtSignature: dummyThoughtSignature, - }) + part := GeminiPart{ + Text: block.Thinking, + Thought: true, + } + // 保留原有 signature(Claude 模型需要有效的 signature) + if block.Signature != "" { + part.ThoughtSignature = block.Signature + } else if !allowDummyThought { + // Claude 模型需要有效 signature,跳过无 signature 的 thinking block + log.Printf("Warning: skipping thinking block without signature for Claude model") continue + } else { + // Gemini 模型使用 dummy signature + part.ThoughtSignature = dummyThoughtSignature } - - // Claude 模型:仅在提供有效 signature 时保留 thinking block;否则跳过以避免上游校验失败。 - signature := strings.TrimSpace(block.Signature) - if signature == "" || signature == dummyThoughtSignature { - log.Printf("[Warning] Skipping thinking block for Claude model (missing or dummy signature)") - continue - } - if !isValidThoughtSignature(signature) { - log.Printf("[Debug] Thinking signature may be invalid (passing through anyway): len=%d", len(signature)) - } - parts = append(parts, GeminiPart{ - Text: block.Thinking, - Thought: true, - ThoughtSignature: signature, - }) + parts = append(parts, part) case "image": if block.Source != nil && block.Source.Type == "base64" { @@ -287,9 +239,10 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu ID: block.ID, }, } - // 只有 Gemini 模型使用 dummy signature - // Claude 模型不设置 signature(避免验证问题) - if allowDummyThought { + // 保留原有 signature,或对 Gemini 模型使用 dummy signature + if block.Signature != "" { + part.ThoughtSignature = block.Signature + } else if allowDummyThought { part.ThoughtSignature = dummyThoughtSignature } parts = append(parts, part) @@ -433,9 +386,9 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 普通工具 var funcDecls []GeminiFunctionDecl - for i, tool := range tools { + for _, tool := range tools { // 跳过无效工具名称 - if strings.TrimSpace(tool.Name) == "" { + if tool.Name == "" { log.Printf("Warning: skipping tool with empty name") continue } @@ -444,18 +397,10 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { var inputSchema map[string]any // 检查是否为 custom 类型工具 (MCP) - if tool.Type == "custom" { - if tool.Custom == nil || tool.Custom.InputSchema == nil { - log.Printf("[Warning] Skipping invalid custom tool '%s': missing custom spec or input_schema", tool.Name) - continue - } + if tool.Type == "custom" && tool.Custom != nil { + // Custom 格式: 从 custom 字段获取 description 和 input_schema description = tool.Custom.Description inputSchema = tool.Custom.InputSchema - - // 调试日志:记录 custom 工具的 schema - if schemaJSON, err := json.Marshal(inputSchema); err == nil { - log.Printf("[Debug] Tool[%d] '%s' (custom) original schema: %s", i, tool.Name, string(schemaJSON)) - } } else { // 标准格式: 从顶层字段获取 description = tool.Description @@ -464,6 +409,7 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 清理 JSON Schema params := cleanJSONSchema(inputSchema) + // 为 nil schema 提供默认值 if params == nil { params = map[string]any{ @@ -472,11 +418,6 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { } } - // 调试日志:记录清理后的 schema - if paramsJSON, err := json.Marshal(params); err == nil { - log.Printf("[Debug] Tool[%d] '%s' cleaned schema: %s", i, tool.Name, string(paramsJSON)) - } - funcDecls = append(funcDecls, GeminiFunctionDecl{ Name: tool.Name, Description: description, @@ -538,64 +479,31 @@ func cleanJSONSchema(schema map[string]any) map[string]any { } // excludedSchemaKeys 不支持的 schema 字段 -// 基于 Claude API (Vertex AI) 的实际支持情况 -// 支持: type, description, enum, properties, required, additionalProperties, items -// 不支持: minItems, maxItems, minLength, maxLength, pattern, minimum, maximum 等验证字段 var excludedSchemaKeys = map[string]bool{ - // 元 schema 字段 - "$schema": true, - "$id": true, - "$ref": true, - - // 字符串验证(Gemini 不支持) - "minLength": true, - "maxLength": true, - "pattern": true, - - // 数字验证(Claude API 通过 Vertex AI 不支持这些字段) - "minimum": true, - "maximum": true, - "exclusiveMinimum": true, - "exclusiveMaximum": true, - "multipleOf": true, - - // 数组验证(Claude API 通过 Vertex AI 不支持这些字段) - "uniqueItems": true, - "minItems": true, - "maxItems": true, - - // 组合 schema(Gemini 不支持) - "oneOf": true, - "anyOf": true, - "allOf": true, - "not": true, - "if": true, - "then": true, - "else": true, - "$defs": true, - "definitions": true, - - // 对象验证(仅保留 properties/required/additionalProperties) - "minProperties": true, - "maxProperties": true, - "patternProperties": true, - "propertyNames": true, - "dependencies": true, - "dependentSchemas": true, - "dependentRequired": true, - - // 其他不支持的字段 - "default": true, - "const": true, - "examples": true, - "deprecated": true, - "readOnly": true, - "writeOnly": true, - "contentMediaType": true, - "contentEncoding": true, - - // Claude 特有字段 - "strict": true, + "$schema": true, + "$id": true, + "$ref": true, + "additionalProperties": true, + "minLength": true, + "maxLength": true, + "minItems": true, + "maxItems": true, + "uniqueItems": true, + "minimum": true, + "maximum": true, + "exclusiveMinimum": true, + "exclusiveMaximum": true, + "pattern": true, + "format": true, + "default": true, + "strict": true, + "const": true, + "examples": true, + "deprecated": true, + "readOnly": true, + "writeOnly": true, + "contentMediaType": true, + "contentEncoding": true, } // cleanSchemaValue 递归清理 schema 值 @@ -615,31 +523,6 @@ func cleanSchemaValue(value any) any { continue } - // 特殊处理 format 字段:只保留 Gemini 支持的 format 值 - if k == "format" { - if formatStr, ok := val.(string); ok { - // Gemini 只支持 date-time, date, time - if formatStr == "date-time" || formatStr == "date" || formatStr == "time" { - result[k] = val - } - // 其他 format 值直接跳过 - } - continue - } - - // 特殊处理 additionalProperties:Claude API 只支持布尔值,不支持 schema 对象 - if k == "additionalProperties" { - if boolVal, ok := val.(bool); ok { - result[k] = boolVal - log.Printf("[Debug] additionalProperties is bool: %v", boolVal) - } else { - // 如果是 schema 对象,转换为 false(更安全的默认值) - result[k] = false - log.Printf("[Debug] additionalProperties is not bool (type: %T), converting to false", val) - } - continue - } - // 递归清理所有值 result[k] = cleanSchemaValue(val) } diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go deleted file mode 100644 index 56eebad0..00000000 --- a/backend/internal/pkg/antigravity/request_transformer_test.go +++ /dev/null @@ -1,179 +0,0 @@ -package antigravity - -import ( - "encoding/json" - "testing" -) - -// TestBuildParts_ThinkingBlockWithoutSignature 测试thinking block无signature时的处理 -func TestBuildParts_ThinkingBlockWithoutSignature(t *testing.T) { - tests := []struct { - name string - content string - allowDummyThought bool - expectedParts int - description string - }{ - { - name: "Claude model - skip thinking block without signature", - content: `[ - {"type": "text", "text": "Hello"}, - {"type": "thinking", "thinking": "Let me think...", "signature": ""}, - {"type": "text", "text": "World"} - ]`, - allowDummyThought: false, - expectedParts: 2, // 只有两个text block - description: "Claude模型应该跳过无signature的thinking block", - }, - { - name: "Claude model - keep thinking block with signature", - content: `[ - {"type": "text", "text": "Hello"}, - {"type": "thinking", "thinking": "Let me think...", "signature": "valid_sig"}, - {"type": "text", "text": "World"} - ]`, - allowDummyThought: false, - expectedParts: 3, // 三个block都保留 - description: "Claude模型应该保留有signature的thinking block", - }, - { - name: "Gemini model - use dummy signature", - content: `[ - {"type": "text", "text": "Hello"}, - {"type": "thinking", "thinking": "Let me think...", "signature": ""}, - {"type": "text", "text": "World"} - ]`, - allowDummyThought: true, - expectedParts: 3, // 三个block都保留,thinking使用dummy signature - description: "Gemini模型应该为无signature的thinking block使用dummy signature", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - toolIDToName := make(map[string]string) - parts, err := buildParts(json.RawMessage(tt.content), toolIDToName, tt.allowDummyThought) - - if err != nil { - t.Fatalf("buildParts() error = %v", err) - } - - if len(parts) != tt.expectedParts { - t.Errorf("%s: got %d parts, want %d parts", tt.description, len(parts), tt.expectedParts) - } - }) - } -} - -// TestBuildTools_CustomTypeTools 测试custom类型工具转换 -func TestBuildTools_CustomTypeTools(t *testing.T) { - tests := []struct { - name string - tools []ClaudeTool - expectedLen int - description string - }{ - { - name: "Standard tool format", - tools: []ClaudeTool{ - { - Name: "get_weather", - Description: "Get weather information", - InputSchema: map[string]any{ - "type": "object", - "properties": map[string]any{ - "location": map[string]any{"type": "string"}, - }, - }, - }, - }, - expectedLen: 1, - description: "标准工具格式应该正常转换", - }, - { - name: "Custom type tool (MCP format)", - tools: []ClaudeTool{ - { - Type: "custom", - Name: "mcp_tool", - Custom: &ClaudeCustomToolSpec{ - Description: "MCP tool description", - InputSchema: map[string]any{ - "type": "object", - "properties": map[string]any{ - "param": map[string]any{"type": "string"}, - }, - }, - }, - }, - }, - expectedLen: 1, - description: "Custom类型工具应该从Custom字段读取description和input_schema", - }, - { - name: "Mixed standard and custom tools", - tools: []ClaudeTool{ - { - Name: "standard_tool", - Description: "Standard tool", - InputSchema: map[string]any{"type": "object"}, - }, - { - Type: "custom", - Name: "custom_tool", - Custom: &ClaudeCustomToolSpec{ - Description: "Custom tool", - InputSchema: map[string]any{"type": "object"}, - }, - }, - }, - expectedLen: 1, // 返回一个GeminiToolDeclaration,包含2个function declarations - description: "混合标准和custom工具应该都能正确转换", - }, - { - name: "Invalid custom tool - nil Custom field", - tools: []ClaudeTool{ - { - Type: "custom", - Name: "invalid_custom", - // Custom 为 nil - }, - }, - expectedLen: 0, // 应该被跳过 - description: "Custom字段为nil的custom工具应该被跳过", - }, - { - name: "Invalid custom tool - nil InputSchema", - tools: []ClaudeTool{ - { - Type: "custom", - Name: "invalid_custom", - Custom: &ClaudeCustomToolSpec{ - Description: "Invalid", - // InputSchema 为 nil - }, - }, - }, - expectedLen: 0, // 应该被跳过 - description: "InputSchema为nil的custom工具应该被跳过", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := buildTools(tt.tools) - - if len(result) != tt.expectedLen { - t.Errorf("%s: got %d tool declarations, want %d", tt.description, len(result), tt.expectedLen) - } - - // 验证function declarations存在 - if len(result) > 0 && result[0].FunctionDeclarations != nil { - if len(result[0].FunctionDeclarations) != len(tt.tools) { - t.Errorf("%s: got %d function declarations, want %d", - tt.description, len(result[0].FunctionDeclarations), len(tt.tools)) - } - } - }) - } -} diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index 0db3ed4a..97ad6c83 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -16,12 +16,6 @@ const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleav // HaikuBetaHeader Haiku 模型使用的 anthropic-beta header(不需要 claude-code beta) const HaikuBetaHeader = BetaOAuth + "," + BetaInterleavedThinking -// ApiKeyBetaHeader API-key 账号建议使用的 anthropic-beta header(不包含 oauth) -const ApiKeyBetaHeader = BetaClaudeCode + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming - -// ApiKeyHaikuBetaHeader Haiku 模型在 API-key 账号下使用的 anthropic-beta header(不包含 oauth / claude-code) -const ApiKeyHaikuBetaHeader = BetaInterleavedThinking - // Claude Code 客户端默认请求头 var DefaultHeaders = map[string]string{ "User-Agent": "claude-cli/2.0.62 (external, cli)", diff --git a/backend/internal/repository/concurrency_cache.go b/backend/internal/repository/concurrency_cache.go index 35296497..9205230b 100644 --- a/backend/internal/repository/concurrency_cache.go +++ b/backend/internal/repository/concurrency_cache.go @@ -2,9 +2,7 @@ package repository import ( "context" - "errors" "fmt" - "strconv" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/redis/go-redis/v9" @@ -29,8 +27,6 @@ const ( userSlotKeyPrefix = "concurrency:user:" // 等待队列计数器格式: concurrency:wait:{userID} waitQueueKeyPrefix = "concurrency:wait:" - // 账号级等待队列计数器格式: wait:account:{accountID} - accountWaitKeyPrefix = "wait:account:" // 默认槽位过期时间(分钟),可通过配置覆盖 defaultSlotTTLMinutes = 15 @@ -116,112 +112,33 @@ var ( redis.call('EXPIRE', KEYS[1], ARGV[2]) end - return 1 - `) - - // incrementAccountWaitScript - account-level wait queue count - incrementAccountWaitScript = redis.NewScript(` - local current = redis.call('GET', KEYS[1]) - if current == false then - current = 0 - else - current = tonumber(current) - end - - if current >= tonumber(ARGV[1]) then - return 0 - end - - local newVal = redis.call('INCR', KEYS[1]) - - -- Only set TTL on first creation to avoid refreshing zombie data - if newVal == 1 then - redis.call('EXPIRE', KEYS[1], ARGV[2]) - end - - return 1 - `) + return 1 + `) // decrementWaitScript - same as before decrementWaitScript = redis.NewScript(` - local current = redis.call('GET', KEYS[1]) - if current ~= false and tonumber(current) > 0 then - redis.call('DECR', KEYS[1]) - end - return 1 - `) - - // getAccountsLoadBatchScript - batch load query (read-only) - // ARGV[1] = slot TTL (seconds, retained for compatibility) - // ARGV[2..n] = accountID1, maxConcurrency1, accountID2, maxConcurrency2, ... - getAccountsLoadBatchScript = redis.NewScript(` - local result = {} - - local i = 2 - while i <= #ARGV do - local accountID = ARGV[i] - local maxConcurrency = tonumber(ARGV[i + 1]) - - local slotKey = 'concurrency:account:' .. accountID - local currentConcurrency = redis.call('ZCARD', slotKey) - - local waitKey = 'wait:account:' .. accountID - local waitingCount = redis.call('GET', waitKey) - if waitingCount == false then - waitingCount = 0 - else - waitingCount = tonumber(waitingCount) - end - - local loadRate = 0 - if maxConcurrency > 0 then - loadRate = math.floor((currentConcurrency + waitingCount) * 100 / maxConcurrency) - end - - table.insert(result, accountID) - table.insert(result, currentConcurrency) - table.insert(result, waitingCount) - table.insert(result, loadRate) - - i = i + 2 - end - - return result - `) - - // cleanupExpiredSlotsScript - remove expired slots - // KEYS[1] = concurrency:account:{accountID} - // ARGV[1] = TTL (seconds) - cleanupExpiredSlotsScript = redis.NewScript(` - local key = KEYS[1] - local ttl = tonumber(ARGV[1]) - local timeResult = redis.call('TIME') - local now = tonumber(timeResult[1]) - local expireBefore = now - ttl - return redis.call('ZREMRANGEBYSCORE', key, '-inf', expireBefore) - `) + local current = redis.call('GET', KEYS[1]) + if current ~= false and tonumber(current) > 0 then + redis.call('DECR', KEYS[1]) + end + return 1 + `) ) type concurrencyCache struct { - rdb *redis.Client - slotTTLSeconds int // 槽位过期时间(秒) - waitQueueTTLSeconds int // 等待队列过期时间(秒) + rdb *redis.Client + slotTTLSeconds int // 槽位过期时间(秒) } // NewConcurrencyCache 创建并发控制缓存 // slotTTLMinutes: 槽位过期时间(分钟),0 或负数使用默认值 15 分钟 -// waitQueueTTLSeconds: 等待队列过期时间(秒),0 或负数使用 slot TTL -func NewConcurrencyCache(rdb *redis.Client, slotTTLMinutes int, waitQueueTTLSeconds int) service.ConcurrencyCache { +func NewConcurrencyCache(rdb *redis.Client, slotTTLMinutes int) service.ConcurrencyCache { if slotTTLMinutes <= 0 { slotTTLMinutes = defaultSlotTTLMinutes } - if waitQueueTTLSeconds <= 0 { - waitQueueTTLSeconds = slotTTLMinutes * 60 - } return &concurrencyCache{ - rdb: rdb, - slotTTLSeconds: slotTTLMinutes * 60, - waitQueueTTLSeconds: waitQueueTTLSeconds, + rdb: rdb, + slotTTLSeconds: slotTTLMinutes * 60, } } @@ -238,10 +155,6 @@ func waitQueueKey(userID int64) string { return fmt.Sprintf("%s%d", waitQueueKeyPrefix, userID) } -func accountWaitKey(accountID int64) string { - return fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) -} - // Account slot operations func (c *concurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { @@ -312,75 +225,3 @@ func (c *concurrencyCache) DecrementWaitCount(ctx context.Context, userID int64) _, err := decrementWaitScript.Run(ctx, c.rdb, []string{key}).Result() return err } - -// Account wait queue operations - -func (c *concurrencyCache) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { - key := accountWaitKey(accountID) - result, err := incrementAccountWaitScript.Run(ctx, c.rdb, []string{key}, maxWait, c.waitQueueTTLSeconds).Int() - if err != nil { - return false, err - } - return result == 1, nil -} - -func (c *concurrencyCache) DecrementAccountWaitCount(ctx context.Context, accountID int64) error { - key := accountWaitKey(accountID) - _, err := decrementWaitScript.Run(ctx, c.rdb, []string{key}).Result() - return err -} - -func (c *concurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { - key := accountWaitKey(accountID) - val, err := c.rdb.Get(ctx, key).Int() - if err != nil && !errors.Is(err, redis.Nil) { - return 0, err - } - if errors.Is(err, redis.Nil) { - return 0, nil - } - return val, nil -} - -func (c *concurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []service.AccountWithConcurrency) (map[int64]*service.AccountLoadInfo, error) { - if len(accounts) == 0 { - return map[int64]*service.AccountLoadInfo{}, nil - } - - args := []any{c.slotTTLSeconds} - for _, acc := range accounts { - args = append(args, acc.ID, acc.MaxConcurrency) - } - - result, err := getAccountsLoadBatchScript.Run(ctx, c.rdb, []string{}, args...).Slice() - if err != nil { - return nil, err - } - - loadMap := make(map[int64]*service.AccountLoadInfo) - for i := 0; i < len(result); i += 4 { - if i+3 >= len(result) { - break - } - - accountID, _ := strconv.ParseInt(fmt.Sprintf("%v", result[i]), 10, 64) - currentConcurrency, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+1])) - waitingCount, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+2])) - loadRate, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+3])) - - loadMap[accountID] = &service.AccountLoadInfo{ - AccountID: accountID, - CurrentConcurrency: currentConcurrency, - WaitingCount: waitingCount, - LoadRate: loadRate, - } - } - - return loadMap, nil -} - -func (c *concurrencyCache) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { - key := accountSlotKey(accountID) - _, err := cleanupExpiredSlotsScript.Run(ctx, c.rdb, []string{key}, c.slotTTLSeconds).Result() - return err -} diff --git a/backend/internal/repository/concurrency_cache_benchmark_test.go b/backend/internal/repository/concurrency_cache_benchmark_test.go index 25697ab1..cafab9cb 100644 --- a/backend/internal/repository/concurrency_cache_benchmark_test.go +++ b/backend/internal/repository/concurrency_cache_benchmark_test.go @@ -22,7 +22,7 @@ func BenchmarkAccountConcurrency(b *testing.B) { _ = rdb.Close() }() - cache, _ := NewConcurrencyCache(rdb, benchSlotTTLMinutes, int(benchSlotTTL.Seconds())).(*concurrencyCache) + cache, _ := NewConcurrencyCache(rdb, benchSlotTTLMinutes).(*concurrencyCache) ctx := context.Background() for _, size := range []int{10, 100, 1000} { diff --git a/backend/internal/repository/concurrency_cache_integration_test.go b/backend/internal/repository/concurrency_cache_integration_test.go index 5983c832..6a7c83f4 100644 --- a/backend/internal/repository/concurrency_cache_integration_test.go +++ b/backend/internal/repository/concurrency_cache_integration_test.go @@ -27,7 +27,7 @@ type ConcurrencyCacheSuite struct { func (s *ConcurrencyCacheSuite) SetupTest() { s.IntegrationRedisSuite.SetupTest() - s.cache = NewConcurrencyCache(s.rdb, testSlotTTLMinutes, int(testSlotTTL.Seconds())) + s.cache = NewConcurrencyCache(s.rdb, testSlotTTLMinutes) } func (s *ConcurrencyCacheSuite) TestAccountSlot_AcquireAndRelease() { @@ -218,48 +218,6 @@ func (s *ConcurrencyCacheSuite) TestWaitQueue_DecrementNoNegative() { require.GreaterOrEqual(s.T(), val, 0, "expected non-negative wait count") } -func (s *ConcurrencyCacheSuite) TestAccountWaitQueue_IncrementAndDecrement() { - accountID := int64(30) - waitKey := fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) - - ok, err := s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) - require.NoError(s.T(), err, "IncrementAccountWaitCount 1") - require.True(s.T(), ok) - - ok, err = s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) - require.NoError(s.T(), err, "IncrementAccountWaitCount 2") - require.True(s.T(), ok) - - ok, err = s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) - require.NoError(s.T(), err, "IncrementAccountWaitCount 3") - require.False(s.T(), ok, "expected account wait increment over max to fail") - - ttl, err := s.rdb.TTL(s.ctx, waitKey).Result() - require.NoError(s.T(), err, "TTL account waitKey") - s.AssertTTLWithin(ttl, 1*time.Second, testSlotTTL) - - require.NoError(s.T(), s.cache.DecrementAccountWaitCount(s.ctx, accountID), "DecrementAccountWaitCount") - - val, err := s.rdb.Get(s.ctx, waitKey).Int() - if !errors.Is(err, redis.Nil) { - require.NoError(s.T(), err, "Get waitKey") - } - require.Equal(s.T(), 1, val, "expected account wait count 1") -} - -func (s *ConcurrencyCacheSuite) TestAccountWaitQueue_DecrementNoNegative() { - accountID := int64(301) - waitKey := fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) - - require.NoError(s.T(), s.cache.DecrementAccountWaitCount(s.ctx, accountID), "DecrementAccountWaitCount on non-existent key") - - val, err := s.rdb.Get(s.ctx, waitKey).Int() - if !errors.Is(err, redis.Nil) { - require.NoError(s.T(), err, "Get waitKey") - } - require.GreaterOrEqual(s.T(), val, 0, "expected non-negative account wait count after decrement on empty") -} - func (s *ConcurrencyCacheSuite) TestGetAccountConcurrency_Missing() { // When no slots exist, GetAccountConcurrency should return 0 cur, err := s.cache.GetAccountConcurrency(s.ctx, 999) @@ -274,139 +232,6 @@ func (s *ConcurrencyCacheSuite) TestGetUserConcurrency_Missing() { require.Equal(s.T(), 0, cur) } -func (s *ConcurrencyCacheSuite) TestGetAccountsLoadBatch() { - s.T().Skip("TODO: Fix this test - CurrentConcurrency returns 0 instead of expected value in CI") - // Setup: Create accounts with different load states - account1 := int64(100) - account2 := int64(101) - account3 := int64(102) - - // Account 1: 2/3 slots used, 1 waiting - ok, err := s.cache.AcquireAccountSlot(s.ctx, account1, 3, "req1") - require.NoError(s.T(), err) - require.True(s.T(), ok) - ok, err = s.cache.AcquireAccountSlot(s.ctx, account1, 3, "req2") - require.NoError(s.T(), err) - require.True(s.T(), ok) - ok, err = s.cache.IncrementAccountWaitCount(s.ctx, account1, 5) - require.NoError(s.T(), err) - require.True(s.T(), ok) - - // Account 2: 1/2 slots used, 0 waiting - ok, err = s.cache.AcquireAccountSlot(s.ctx, account2, 2, "req3") - require.NoError(s.T(), err) - require.True(s.T(), ok) - - // Account 3: 0/1 slots used, 0 waiting (idle) - - // Query batch load - accounts := []service.AccountWithConcurrency{ - {ID: account1, MaxConcurrency: 3}, - {ID: account2, MaxConcurrency: 2}, - {ID: account3, MaxConcurrency: 1}, - } - - loadMap, err := s.cache.GetAccountsLoadBatch(s.ctx, accounts) - require.NoError(s.T(), err) - require.Len(s.T(), loadMap, 3) - - // Verify account1: (2 + 1) / 3 = 100% - load1 := loadMap[account1] - require.NotNil(s.T(), load1) - require.Equal(s.T(), account1, load1.AccountID) - require.Equal(s.T(), 2, load1.CurrentConcurrency) - require.Equal(s.T(), 1, load1.WaitingCount) - require.Equal(s.T(), 100, load1.LoadRate) - - // Verify account2: (1 + 0) / 2 = 50% - load2 := loadMap[account2] - require.NotNil(s.T(), load2) - require.Equal(s.T(), account2, load2.AccountID) - require.Equal(s.T(), 1, load2.CurrentConcurrency) - require.Equal(s.T(), 0, load2.WaitingCount) - require.Equal(s.T(), 50, load2.LoadRate) - - // Verify account3: (0 + 0) / 1 = 0% - load3 := loadMap[account3] - require.NotNil(s.T(), load3) - require.Equal(s.T(), account3, load3.AccountID) - require.Equal(s.T(), 0, load3.CurrentConcurrency) - require.Equal(s.T(), 0, load3.WaitingCount) - require.Equal(s.T(), 0, load3.LoadRate) -} - -func (s *ConcurrencyCacheSuite) TestGetAccountsLoadBatch_Empty() { - // Test with empty account list - loadMap, err := s.cache.GetAccountsLoadBatch(s.ctx, []service.AccountWithConcurrency{}) - require.NoError(s.T(), err) - require.Empty(s.T(), loadMap) -} - -func (s *ConcurrencyCacheSuite) TestCleanupExpiredAccountSlots() { - accountID := int64(200) - slotKey := fmt.Sprintf("%s%d", accountSlotKeyPrefix, accountID) - - // Acquire 3 slots - ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req1") - require.NoError(s.T(), err) - require.True(s.T(), ok) - ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req2") - require.NoError(s.T(), err) - require.True(s.T(), ok) - ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req3") - require.NoError(s.T(), err) - require.True(s.T(), ok) - - // Verify 3 slots exist - cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) - require.NoError(s.T(), err) - require.Equal(s.T(), 3, cur) - - // Manually set old timestamps for req1 and req2 (simulate expired slots) - now := time.Now().Unix() - expiredTime := now - int64(testSlotTTL.Seconds()) - 10 // 10 seconds past TTL - err = s.rdb.ZAdd(s.ctx, slotKey, redis.Z{Score: float64(expiredTime), Member: "req1"}).Err() - require.NoError(s.T(), err) - err = s.rdb.ZAdd(s.ctx, slotKey, redis.Z{Score: float64(expiredTime), Member: "req2"}).Err() - require.NoError(s.T(), err) - - // Run cleanup - err = s.cache.CleanupExpiredAccountSlots(s.ctx, accountID) - require.NoError(s.T(), err) - - // Verify only 1 slot remains (req3) - cur, err = s.cache.GetAccountConcurrency(s.ctx, accountID) - require.NoError(s.T(), err) - require.Equal(s.T(), 1, cur) - - // Verify req3 still exists - members, err := s.rdb.ZRange(s.ctx, slotKey, 0, -1).Result() - require.NoError(s.T(), err) - require.Len(s.T(), members, 1) - require.Equal(s.T(), "req3", members[0]) -} - -func (s *ConcurrencyCacheSuite) TestCleanupExpiredAccountSlots_NoExpired() { - accountID := int64(201) - - // Acquire 2 fresh slots - ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req1") - require.NoError(s.T(), err) - require.True(s.T(), ok) - ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req2") - require.NoError(s.T(), err) - require.True(s.T(), ok) - - // Run cleanup (should not remove anything) - err = s.cache.CleanupExpiredAccountSlots(s.ctx, accountID) - require.NoError(s.T(), err) - - // Verify both slots still exist - cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) - require.NoError(s.T(), err) - require.Equal(s.T(), 2, cur) -} - func TestConcurrencyCacheSuite(t *testing.T) { suite.Run(t, new(ConcurrencyCacheSuite)) } diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index 0d579b23..2de2d1de 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -15,14 +15,7 @@ import ( // ProvideConcurrencyCache 创建并发控制缓存,从配置读取 TTL 参数 // 性能优化:TTL 可配置,支持长时间运行的 LLM 请求场景 func ProvideConcurrencyCache(rdb *redis.Client, cfg *config.Config) service.ConcurrencyCache { - waitTTLSeconds := int(cfg.Gateway.Scheduling.StickySessionWaitTimeout.Seconds()) - if cfg.Gateway.Scheduling.FallbackWaitTimeout > cfg.Gateway.Scheduling.StickySessionWaitTimeout { - waitTTLSeconds = int(cfg.Gateway.Scheduling.FallbackWaitTimeout.Seconds()) - } - if waitTTLSeconds <= 0 { - waitTTLSeconds = cfg.Gateway.ConcurrencySlotTTLMinutes * 60 - } - return NewConcurrencyCache(rdb, cfg.Gateway.ConcurrencySlotTTLMinutes, waitTTLSeconds) + return NewConcurrencyCache(rdb, cfg.Gateway.ConcurrencySlotTTLMinutes) } // ProviderSet is the Wire provider set for all repositories diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 5b3bf565..ae2976f8 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -358,15 +358,6 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return nil, fmt.Errorf("transform request: %w", err) } - // 调试:记录转换后的请求体(仅记录前 2000 字符) - if bodyJSON, err := json.Marshal(geminiBody); err == nil { - truncated := string(bodyJSON) - if len(truncated) > 2000 { - truncated = truncated[:2000] + "..." - } - log.Printf("[Debug] Transformed Gemini request: %s", truncated) - } - // 构建上游 action action := "generateContent" if claudeReq.Stream { diff --git a/backend/internal/service/concurrency_service.go b/backend/internal/service/concurrency_service.go index 65ef16db..b5229491 100644 --- a/backend/internal/service/concurrency_service.go +++ b/backend/internal/service/concurrency_service.go @@ -18,11 +18,6 @@ type ConcurrencyCache interface { ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error GetAccountConcurrency(ctx context.Context, accountID int64) (int, error) - // 账号等待队列(账号级) - IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) - DecrementAccountWaitCount(ctx context.Context, accountID int64) error - GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) - // 用户槽位管理 // 键格式: concurrency:user:{userID}(有序集合,成员为 requestID) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) @@ -32,12 +27,6 @@ type ConcurrencyCache interface { // 等待队列计数(只在首次创建时设置 TTL) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) DecrementWaitCount(ctx context.Context, userID int64) error - - // 批量负载查询(只读) - GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) - - // 清理过期槽位(后台任务) - CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error } // generateRequestID generates a unique request ID for concurrency slot tracking @@ -72,18 +61,6 @@ type AcquireResult struct { ReleaseFunc func() // Must be called when done (typically via defer) } -type AccountWithConcurrency struct { - ID int64 - MaxConcurrency int -} - -type AccountLoadInfo struct { - AccountID int64 - CurrentConcurrency int - WaitingCount int - LoadRate int // 0-100+ (percent) -} - // AcquireAccountSlot attempts to acquire a concurrency slot for an account. // If the account is at max concurrency, it waits until a slot is available or timeout. // Returns a release function that MUST be called when the request completes. @@ -200,42 +177,6 @@ func (s *ConcurrencyService) DecrementWaitCount(ctx context.Context, userID int6 } } -// IncrementAccountWaitCount increments the wait queue counter for an account. -func (s *ConcurrencyService) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { - if s.cache == nil { - return true, nil - } - - result, err := s.cache.IncrementAccountWaitCount(ctx, accountID, maxWait) - if err != nil { - log.Printf("Warning: increment wait count failed for account %d: %v", accountID, err) - return true, nil - } - return result, nil -} - -// DecrementAccountWaitCount decrements the wait queue counter for an account. -func (s *ConcurrencyService) DecrementAccountWaitCount(ctx context.Context, accountID int64) { - if s.cache == nil { - return - } - - bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - if err := s.cache.DecrementAccountWaitCount(bgCtx, accountID); err != nil { - log.Printf("Warning: decrement wait count failed for account %d: %v", accountID, err) - } -} - -// GetAccountWaitingCount gets current wait queue count for an account. -func (s *ConcurrencyService) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { - if s.cache == nil { - return 0, nil - } - return s.cache.GetAccountWaitingCount(ctx, accountID) -} - // CalculateMaxWait calculates the maximum wait queue size for a user // maxWait = userConcurrency + defaultExtraWaitSlots func CalculateMaxWait(userConcurrency int) int { @@ -245,57 +186,6 @@ func CalculateMaxWait(userConcurrency int) int { return userConcurrency + defaultExtraWaitSlots } -// GetAccountsLoadBatch returns load info for multiple accounts. -func (s *ConcurrencyService) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { - if s.cache == nil { - return map[int64]*AccountLoadInfo{}, nil - } - return s.cache.GetAccountsLoadBatch(ctx, accounts) -} - -// CleanupExpiredAccountSlots removes expired slots for one account (background task). -func (s *ConcurrencyService) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { - if s.cache == nil { - return nil - } - return s.cache.CleanupExpiredAccountSlots(ctx, accountID) -} - -// StartSlotCleanupWorker starts a background cleanup worker for expired account slots. -func (s *ConcurrencyService) StartSlotCleanupWorker(accountRepo AccountRepository, interval time.Duration) { - if s == nil || s.cache == nil || accountRepo == nil || interval <= 0 { - return - } - - runCleanup := func() { - listCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - accounts, err := accountRepo.ListSchedulable(listCtx) - cancel() - if err != nil { - log.Printf("Warning: list schedulable accounts failed: %v", err) - return - } - for _, account := range accounts { - accountCtx, accountCancel := context.WithTimeout(context.Background(), 2*time.Second) - err := s.cache.CleanupExpiredAccountSlots(accountCtx, account.ID) - accountCancel() - if err != nil { - log.Printf("Warning: cleanup expired slots failed for account %d: %v", account.ID, err) - } - } - } - - go func() { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - runCleanup() - for range ticker.C { - runCleanup() - } - }() -} - // GetAccountConcurrencyBatch gets current concurrency counts for multiple accounts // Returns a map of accountID -> current concurrency count func (s *ConcurrencyService) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index 560c7767..d779bcfa 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -261,34 +261,6 @@ func TestGatewayService_SelectAccountForModelWithPlatform_PriorityAndLastUsed(t require.Equal(t, int64(2), acc.ID, "同优先级应选择最久未用的账户") } -func TestGatewayService_SelectAccountForModelWithPlatform_GeminiOAuthPreference(t *testing.T) { - ctx := context.Background() - - repo := &mockAccountRepoForPlatform{ - accounts: []Account{ - {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeApiKey}, - {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, - }, - accountsByID: map[int64]*Account{}, - } - for i := range repo.accounts { - repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] - } - - cache := &mockGatewayCacheForPlatform{} - - svc := &GatewayService{ - accountRepo: repo, - cache: cache, - cfg: testConfig(), - } - - acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-2.5-pro", nil, PlatformGemini) - require.NoError(t, err) - require.NotNil(t, acc) - require.Equal(t, int64(2), acc.ID, "同优先级且未使用时应优先选择OAuth账户") -} - // TestGatewayService_SelectAccountForModelWithPlatform_NoAvailableAccounts 测试无可用账户 func TestGatewayService_SelectAccountForModelWithPlatform_NoAvailableAccounts(t *testing.T) { ctx := context.Background() @@ -604,32 +576,6 @@ func TestGatewayService_isModelSupportedByAccount(t *testing.T) { func TestGatewayService_selectAccountWithMixedScheduling(t *testing.T) { ctx := context.Background() - t.Run("混合调度-Gemini优先选择OAuth账户", func(t *testing.T) { - repo := &mockAccountRepoForPlatform{ - accounts: []Account{ - {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeApiKey}, - {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, - }, - accountsByID: map[int64]*Account{}, - } - for i := range repo.accounts { - repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] - } - - cache := &mockGatewayCacheForPlatform{} - - svc := &GatewayService{ - accountRepo: repo, - cache: cache, - cfg: testConfig(), - } - - acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "gemini-2.5-pro", nil, PlatformGemini) - require.NoError(t, err) - require.NotNil(t, acc) - require.Equal(t, int64(2), acc.ID, "同优先级且未使用时应优先选择OAuth账户") - }) - t.Run("混合调度-包含启用mixed_scheduling的antigravity账户", func(t *testing.T) { repo := &mockAccountRepoForPlatform{ accounts: []Account{ @@ -837,160 +783,3 @@ func TestAccount_IsMixedSchedulingEnabled(t *testing.T) { }) } } - -// mockConcurrencyService for testing -type mockConcurrencyService struct { - accountLoads map[int64]*AccountLoadInfo - accountWaitCounts map[int64]int - acquireResults map[int64]bool -} - -func (m *mockConcurrencyService) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { - if m.accountLoads == nil { - return map[int64]*AccountLoadInfo{}, nil - } - result := make(map[int64]*AccountLoadInfo) - for _, acc := range accounts { - if load, ok := m.accountLoads[acc.ID]; ok { - result[acc.ID] = load - } else { - result[acc.ID] = &AccountLoadInfo{ - AccountID: acc.ID, - CurrentConcurrency: 0, - WaitingCount: 0, - LoadRate: 0, - } - } - } - return result, nil -} - -func (m *mockConcurrencyService) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { - if m.accountWaitCounts == nil { - return 0, nil - } - return m.accountWaitCounts[accountID], nil -} - -// TestGatewayService_SelectAccountWithLoadAwareness tests load-aware account selection -func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { - ctx := context.Background() - - t.Run("禁用负载批量查询-降级到传统选择", func(t *testing.T) { - repo := &mockAccountRepoForPlatform{ - accounts: []Account{ - {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, - {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, - }, - accountsByID: map[int64]*Account{}, - } - for i := range repo.accounts { - repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] - } - - cache := &mockGatewayCacheForPlatform{} - - cfg := testConfig() - cfg.Gateway.Scheduling.LoadBatchEnabled = false - - svc := &GatewayService{ - accountRepo: repo, - cache: cache, - cfg: cfg, - concurrencyService: nil, // No concurrency service - } - - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) - require.NoError(t, err) - require.NotNil(t, result) - require.NotNil(t, result.Account) - require.Equal(t, int64(1), result.Account.ID, "应选择优先级最高的账号") - }) - - t.Run("无ConcurrencyService-降级到传统选择", func(t *testing.T) { - repo := &mockAccountRepoForPlatform{ - accounts: []Account{ - {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, - {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, - }, - accountsByID: map[int64]*Account{}, - } - for i := range repo.accounts { - repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] - } - - cache := &mockGatewayCacheForPlatform{} - - cfg := testConfig() - cfg.Gateway.Scheduling.LoadBatchEnabled = true - - svc := &GatewayService{ - accountRepo: repo, - cache: cache, - cfg: cfg, - concurrencyService: nil, - } - - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) - require.NoError(t, err) - require.NotNil(t, result) - require.NotNil(t, result.Account) - require.Equal(t, int64(2), result.Account.ID, "应选择优先级最高的账号") - }) - - t.Run("排除账号-不选择被排除的账号", func(t *testing.T) { - repo := &mockAccountRepoForPlatform{ - accounts: []Account{ - {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, - {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, - }, - accountsByID: map[int64]*Account{}, - } - for i := range repo.accounts { - repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] - } - - cache := &mockGatewayCacheForPlatform{} - - cfg := testConfig() - cfg.Gateway.Scheduling.LoadBatchEnabled = false - - svc := &GatewayService{ - accountRepo: repo, - cache: cache, - cfg: cfg, - concurrencyService: nil, - } - - excludedIDs := map[int64]struct{}{1: {}} - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", excludedIDs) - require.NoError(t, err) - require.NotNil(t, result) - require.NotNil(t, result.Account) - require.Equal(t, int64(2), result.Account.ID, "不应选择被排除的账号") - }) - - t.Run("无可用账号-返回错误", func(t *testing.T) { - repo := &mockAccountRepoForPlatform{ - accounts: []Account{}, - accountsByID: map[int64]*Account{}, - } - - cache := &mockGatewayCacheForPlatform{} - - cfg := testConfig() - cfg.Gateway.Scheduling.LoadBatchEnabled = false - - svc := &GatewayService{ - accountRepo: repo, - cache: cache, - cfg: cfg, - concurrencyService: nil, - } - - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) - require.Error(t, err) - require.Nil(t, result) - require.Contains(t, err.Error(), "no available accounts") - }) -} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index cb60131b..d542e9c2 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -13,14 +13,12 @@ import ( "log" "net/http" "regexp" - "sort" "strings" "time" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" - "github.com/tidwall/gjson" "github.com/tidwall/sjson" "github.com/gin-gonic/gin" @@ -68,20 +66,6 @@ type GatewayCache interface { RefreshSessionTTL(ctx context.Context, sessionHash string, ttl time.Duration) error } -type AccountWaitPlan struct { - AccountID int64 - MaxConcurrency int - Timeout time.Duration - MaxWaiting int -} - -type AccountSelectionResult struct { - Account *Account - Acquired bool - ReleaseFunc func() - WaitPlan *AccountWaitPlan // nil means no wait allowed -} - // ClaudeUsage 表示Claude API返回的usage信息 type ClaudeUsage struct { InputTokens int `json:"input_tokens"` @@ -124,7 +108,6 @@ type GatewayService struct { identityService *IdentityService httpUpstream HTTPUpstream deferredService *DeferredService - concurrencyService *ConcurrencyService } // NewGatewayService creates a new GatewayService @@ -136,7 +119,6 @@ func NewGatewayService( userSubRepo UserSubscriptionRepository, cache GatewayCache, cfg *config.Config, - concurrencyService *ConcurrencyService, billingService *BillingService, rateLimitService *RateLimitService, billingCacheService *BillingCacheService, @@ -152,7 +134,6 @@ func NewGatewayService( userSubRepo: userSubRepo, cache: cache, cfg: cfg, - concurrencyService: concurrencyService, billingService: billingService, rateLimitService: rateLimitService, billingCacheService: billingCacheService, @@ -202,14 +183,6 @@ func (s *GatewayService) GenerateSessionHash(parsed *ParsedRequest) string { return "" } -// BindStickySession sets session -> account binding with standard TTL. -func (s *GatewayService) BindStickySession(ctx context.Context, sessionHash string, accountID int64) error { - if sessionHash == "" || accountID <= 0 { - return nil - } - return s.cache.SetSessionAccountID(ctx, sessionHash, accountID, stickySessionTTL) -} - func (s *GatewayService) extractCacheableContent(parsed *ParsedRequest) string { if parsed == nil { return "" @@ -359,354 +332,8 @@ func (s *GatewayService) SelectAccountForModelWithExclusions(ctx context.Context return s.selectAccountForModelWithPlatform(ctx, groupID, sessionHash, requestedModel, excludedIDs, platform) } -// SelectAccountWithLoadAwareness selects account with load-awareness and wait plan. -func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { - cfg := s.schedulingConfig() - var stickyAccountID int64 - if sessionHash != "" && s.cache != nil { - if accountID, err := s.cache.GetSessionAccountID(ctx, sessionHash); err == nil { - stickyAccountID = accountID - } - } - if s.concurrencyService == nil || !cfg.LoadBatchEnabled { - account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) - if err != nil { - return nil, err - } - result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) - if err == nil && result.Acquired { - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { - waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) - if waitingCount < cfg.StickySessionMaxWaiting { - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: account.ID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil - } - } - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: account.ID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.FallbackWaitTimeout, - MaxWaiting: cfg.FallbackMaxWaiting, - }, - }, nil - } - - platform, hasForcePlatform, err := s.resolvePlatform(ctx, groupID) - if err != nil { - return nil, err - } - preferOAuth := platform == PlatformGemini - - accounts, useMixed, err := s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) - if err != nil { - return nil, err - } - if len(accounts) == 0 { - return nil, errors.New("no available accounts") - } - - isExcluded := func(accountID int64) bool { - if excludedIDs == nil { - return false - } - _, excluded := excludedIDs[accountID] - return excluded - } - - // ============ Layer 1: 粘性会话优先 ============ - if sessionHash != "" { - accountID, err := s.cache.GetSessionAccountID(ctx, sessionHash) - if err == nil && accountID > 0 && !isExcluded(accountID) { - account, err := s.accountRepo.GetByID(ctx, accountID) - if err == nil && s.isAccountAllowedForPlatform(account, platform, useMixed) && - account.IsSchedulable() && - (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { - result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) - if err == nil && result.Acquired { - _ = s.cache.RefreshSessionTTL(ctx, sessionHash, stickySessionTTL) - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - - waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) - if waitingCount < cfg.StickySessionMaxWaiting { - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: accountID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil - } - } - } - } - - // ============ Layer 2: 负载感知选择 ============ - candidates := make([]*Account, 0, len(accounts)) - for i := range accounts { - acc := &accounts[i] - if isExcluded(acc.ID) { - continue - } - if !s.isAccountAllowedForPlatform(acc, platform, useMixed) { - continue - } - if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { - continue - } - candidates = append(candidates, acc) - } - - if len(candidates) == 0 { - return nil, errors.New("no available accounts") - } - - accountLoads := make([]AccountWithConcurrency, 0, len(candidates)) - for _, acc := range candidates { - accountLoads = append(accountLoads, AccountWithConcurrency{ - ID: acc.ID, - MaxConcurrency: acc.Concurrency, - }) - } - - loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) - if err != nil { - if result, ok := s.tryAcquireByLegacyOrder(ctx, candidates, sessionHash, preferOAuth); ok { - return result, nil - } - } else { - type accountWithLoad struct { - account *Account - loadInfo *AccountLoadInfo - } - var available []accountWithLoad - for _, acc := range candidates { - loadInfo := loadMap[acc.ID] - if loadInfo == nil { - loadInfo = &AccountLoadInfo{AccountID: acc.ID} - } - if loadInfo.LoadRate < 100 { - available = append(available, accountWithLoad{ - account: acc, - loadInfo: loadInfo, - }) - } - } - - if len(available) > 0 { - sort.SliceStable(available, func(i, j int) bool { - a, b := available[i], available[j] - if a.account.Priority != b.account.Priority { - return a.account.Priority < b.account.Priority - } - if a.loadInfo.LoadRate != b.loadInfo.LoadRate { - return a.loadInfo.LoadRate < b.loadInfo.LoadRate - } - switch { - case a.account.LastUsedAt == nil && b.account.LastUsedAt != nil: - return true - case a.account.LastUsedAt != nil && b.account.LastUsedAt == nil: - return false - case a.account.LastUsedAt == nil && b.account.LastUsedAt == nil: - if preferOAuth && a.account.Type != b.account.Type { - return a.account.Type == AccountTypeOAuth - } - return false - default: - return a.account.LastUsedAt.Before(*b.account.LastUsedAt) - } - }) - - for _, item := range available { - result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) - if err == nil && result.Acquired { - if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, sessionHash, item.account.ID, stickySessionTTL) - } - return &AccountSelectionResult{ - Account: item.account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - } - } - } - - // ============ Layer 3: 兜底排队 ============ - sortAccountsByPriorityAndLastUsed(candidates, preferOAuth) - for _, acc := range candidates { - return &AccountSelectionResult{ - Account: acc, - WaitPlan: &AccountWaitPlan{ - AccountID: acc.ID, - MaxConcurrency: acc.Concurrency, - Timeout: cfg.FallbackWaitTimeout, - MaxWaiting: cfg.FallbackMaxWaiting, - }, - }, nil - } - return nil, errors.New("no available accounts") -} - -func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates []*Account, sessionHash string, preferOAuth bool) (*AccountSelectionResult, bool) { - ordered := append([]*Account(nil), candidates...) - sortAccountsByPriorityAndLastUsed(ordered, preferOAuth) - - for _, acc := range ordered { - result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) - if err == nil && result.Acquired { - if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, sessionHash, acc.ID, stickySessionTTL) - } - return &AccountSelectionResult{ - Account: acc, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, true - } - } - - return nil, false -} - -func (s *GatewayService) schedulingConfig() config.GatewaySchedulingConfig { - if s.cfg != nil { - return s.cfg.Gateway.Scheduling - } - return config.GatewaySchedulingConfig{ - StickySessionMaxWaiting: 3, - StickySessionWaitTimeout: 45 * time.Second, - FallbackWaitTimeout: 30 * time.Second, - FallbackMaxWaiting: 100, - LoadBatchEnabled: true, - SlotCleanupInterval: 30 * time.Second, - } -} - -func (s *GatewayService) resolvePlatform(ctx context.Context, groupID *int64) (string, bool, error) { - forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) - if hasForcePlatform && forcePlatform != "" { - return forcePlatform, true, nil - } - if groupID != nil { - group, err := s.groupRepo.GetByID(ctx, *groupID) - if err != nil { - return "", false, fmt.Errorf("get group failed: %w", err) - } - return group.Platform, false, nil - } - return PlatformAnthropic, false, nil -} - -func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, bool, error) { - useMixed := (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform - if useMixed { - platforms := []string{platform, PlatformAntigravity} - var accounts []Account - var err error - if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, *groupID, platforms) - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, platforms) - } - if err != nil { - return nil, useMixed, err - } - filtered := make([]Account, 0, len(accounts)) - for _, acc := range accounts { - if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { - continue - } - filtered = append(filtered, acc) - } - return filtered, useMixed, nil - } - - var accounts []Account - var err error - if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) - } else if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, platform) - if err == nil && len(accounts) == 0 && hasForcePlatform { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) - } - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) - } - if err != nil { - return nil, useMixed, err - } - return accounts, useMixed, nil -} - -func (s *GatewayService) isAccountAllowedForPlatform(account *Account, platform string, useMixed bool) bool { - if account == nil { - return false - } - if useMixed { - if account.Platform == platform { - return true - } - return account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() - } - return account.Platform == platform -} - -func (s *GatewayService) tryAcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) { - if s.concurrencyService == nil { - return &AcquireResult{Acquired: true, ReleaseFunc: func() {}}, nil - } - return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) -} - -func sortAccountsByPriorityAndLastUsed(accounts []*Account, preferOAuth bool) { - sort.SliceStable(accounts, func(i, j int) bool { - a, b := accounts[i], accounts[j] - if a.Priority != b.Priority { - return a.Priority < b.Priority - } - switch { - case a.LastUsedAt == nil && b.LastUsedAt != nil: - return true - case a.LastUsedAt != nil && b.LastUsedAt == nil: - return false - case a.LastUsedAt == nil && b.LastUsedAt == nil: - if preferOAuth && a.Type != b.Type { - return a.Type == AccountTypeOAuth - } - return false - default: - return a.LastUsedAt.Before(*b.LastUsedAt) - } - }) -} - // selectAccountForModelWithPlatform 选择单平台账户(完全隔离) func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, platform string) (*Account, error) { - preferOAuth := platform == PlatformGemini // 1. 查询粘性会话 if sessionHash != "" { accountID, err := s.cache.GetSessionAccountID(ctx, sessionHash) @@ -762,9 +389,7 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, case acc.LastUsedAt != nil && selected.LastUsedAt == nil: // keep selected (never used is preferred) case acc.LastUsedAt == nil && selected.LastUsedAt == nil: - if preferOAuth && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { - selected = acc - } + // keep selected (both never used) default: if acc.LastUsedAt.Before(*selected.LastUsedAt) { selected = acc @@ -794,7 +419,6 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, // 查询原生平台账户 + 启用 mixed_scheduling 的 antigravity 账户 func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, nativePlatform string) (*Account, error) { platforms := []string{nativePlatform, PlatformAntigravity} - preferOAuth := nativePlatform == PlatformGemini // 1. 查询粘性会话 if sessionHash != "" { @@ -854,9 +478,7 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g case acc.LastUsedAt != nil && selected.LastUsedAt == nil: // keep selected (never used is preferred) case acc.LastUsedAt == nil && selected.LastUsedAt == nil: - if preferOAuth && acc.Platform == PlatformGemini && selected.Platform == PlatformGemini && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { - selected = acc - } + // keep selected (both never used) default: if acc.LastUsedAt.Before(*selected.LastUsedAt) { selected = acc @@ -1062,30 +684,6 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // 处理错误响应(不可重试的错误) if resp.StatusCode >= 400 { - // 可选:对部分 400 触发 failover(默认关闭以保持语义) - if resp.StatusCode == 400 && s.cfg != nil && s.cfg.Gateway.FailoverOn400 { - respBody, readErr := io.ReadAll(resp.Body) - if readErr != nil { - // ReadAll failed, fall back to normal error handling without consuming the stream - return s.handleErrorResponse(ctx, resp, c, account) - } - _ = resp.Body.Close() - resp.Body = io.NopCloser(bytes.NewReader(respBody)) - - if s.shouldFailoverOn400(respBody) { - if s.cfg.Gateway.LogUpstreamErrorBody { - log.Printf( - "Account %d: 400 error, attempting failover: %s", - account.ID, - truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), - ) - } else { - log.Printf("Account %d: 400 error, attempting failover", account.ID) - } - s.handleFailoverSideEffects(ctx, resp, account) - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} - } - } return s.handleErrorResponse(ctx, resp, c, account) } @@ -1188,13 +786,6 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 处理anthropic-beta header(OAuth账号需要特殊处理) if tokenType == "oauth" { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) - } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForApiKey && req.Header.Get("anthropic-beta") == "" { - // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) - if requestNeedsBetaFeatures(body) { - if beta := defaultApiKeyBetaHeader(body); beta != "" { - req.Header.Set("anthropic-beta", beta) - } - } } return req, nil @@ -1247,83 +838,6 @@ func (s *GatewayService) getBetaHeader(modelID string, clientBetaHeader string) return claude.DefaultBetaHeader } -func requestNeedsBetaFeatures(body []byte) bool { - tools := gjson.GetBytes(body, "tools") - if tools.Exists() && tools.IsArray() && len(tools.Array()) > 0 { - return true - } - if strings.EqualFold(gjson.GetBytes(body, "thinking.type").String(), "enabled") { - return true - } - return false -} - -func defaultApiKeyBetaHeader(body []byte) string { - modelID := gjson.GetBytes(body, "model").String() - if strings.Contains(strings.ToLower(modelID), "haiku") { - return claude.ApiKeyHaikuBetaHeader - } - return claude.ApiKeyBetaHeader -} - -func truncateForLog(b []byte, maxBytes int) string { - if maxBytes <= 0 { - maxBytes = 2048 - } - if len(b) > maxBytes { - b = b[:maxBytes] - } - s := string(b) - // 保持一行,避免污染日志格式 - s = strings.ReplaceAll(s, "\n", "\\n") - s = strings.ReplaceAll(s, "\r", "\\r") - return s -} - -func (s *GatewayService) shouldFailoverOn400(respBody []byte) bool { - // 只对“可能是兼容性差异导致”的 400 允许切换,避免无意义重试。 - // 默认保守:无法识别则不切换。 - msg := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) - if msg == "" { - return false - } - - // 缺少/错误的 beta header:换账号/链路可能成功(尤其是混合调度时)。 - // 更精确匹配 beta 相关的兼容性问题,避免误触发切换。 - if strings.Contains(msg, "anthropic-beta") || - strings.Contains(msg, "beta feature") || - strings.Contains(msg, "requires beta") { - return true - } - - // thinking/tool streaming 等兼容性约束(常见于中间转换链路) - if strings.Contains(msg, "thinking") || strings.Contains(msg, "thought_signature") || strings.Contains(msg, "signature") { - return true - } - if strings.Contains(msg, "tool_use") || strings.Contains(msg, "tool_result") || strings.Contains(msg, "tools") { - return true - } - - return false -} - -func extractUpstreamErrorMessage(body []byte) string { - // Claude 风格:{"type":"error","error":{"type":"...","message":"..."}} - if m := gjson.GetBytes(body, "error.message").String(); strings.TrimSpace(m) != "" { - inner := strings.TrimSpace(m) - // 有些上游会把完整 JSON 作为字符串塞进 message - if strings.HasPrefix(inner, "{") { - if innerMsg := gjson.Get(inner, "error.message").String(); strings.TrimSpace(innerMsg) != "" { - return innerMsg - } - } - return m - } - - // 兜底:尝试顶层 message - return gjson.GetBytes(body, "message").String() -} - func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { body, _ := io.ReadAll(resp.Body) @@ -1336,16 +850,6 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res switch resp.StatusCode { case 400: - // 仅记录上游错误摘要(避免输出请求内容);需要时可通过配置打开 - if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { - log.Printf( - "Upstream 400 error (account=%d platform=%s type=%s): %s", - account.ID, - account.Platform, - account.Type, - truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), - ) - } c.Data(http.StatusBadRequest, "application/json", body) return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) case 401: @@ -1825,18 +1329,6 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, // 标记账号状态(429/529等) s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) - // 记录上游错误摘要便于排障(不回显请求内容) - if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { - log.Printf( - "count_tokens upstream error %d (account=%d platform=%s type=%s): %s", - resp.StatusCode, - account.ID, - account.Platform, - account.Type, - truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), - ) - } - // 返回简化的错误响应 errMsg := "Upstream request failed" switch resp.StatusCode { @@ -1917,13 +1409,6 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // OAuth 账号:处理 anthropic-beta header if tokenType == "oauth" { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) - } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForApiKey && req.Header.Get("anthropic-beta") == "" { - // API-key:与 messages 同步的按需 beta 注入(默认关闭) - if requestNeedsBetaFeatures(body) { - if beta := defaultApiKeyBetaHeader(body); beta != "" { - req.Header.Set("anthropic-beta", beta) - } - } } return req, nil diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index b1877800..a0bf1b6a 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -2278,13 +2278,11 @@ func convertClaudeToolsToGeminiTools(tools any) []any { "properties": map[string]any{}, } } - // 清理 JSON Schema - cleanedParams := cleanToolSchema(params) funcDecls = append(funcDecls, map[string]any{ "name": name, "description": desc, - "parameters": cleanedParams, + "parameters": params, }) } @@ -2298,41 +2296,6 @@ func convertClaudeToolsToGeminiTools(tools any) []any { } } -// cleanToolSchema 清理工具的 JSON Schema,移除 Gemini 不支持的字段 -func cleanToolSchema(schema any) any { - if schema == nil { - return nil - } - - switch v := schema.(type) { - case map[string]any: - cleaned := make(map[string]any) - for key, value := range v { - // 跳过不支持的字段 - if key == "$schema" || key == "$id" || key == "$ref" || - key == "additionalProperties" || key == "minLength" || - key == "maxLength" || key == "minItems" || key == "maxItems" { - continue - } - // 递归清理嵌套对象 - cleaned[key] = cleanToolSchema(value) - } - // 规范化 type 字段为大写 - if typeVal, ok := cleaned["type"].(string); ok { - cleaned["type"] = strings.ToUpper(typeVal) - } - return cleaned - case []any: - cleaned := make([]any, len(v)) - for i, item := range v { - cleaned[i] = cleanToolSchema(item) - } - return cleaned - default: - return v - } -} - func convertClaudeGenerationConfig(req map[string]any) map[string]any { out := make(map[string]any) if mt, ok := asInt(req["max_tokens"]); ok && mt > 0 { diff --git a/backend/internal/service/gemini_messages_compat_service_test.go b/backend/internal/service/gemini_messages_compat_service_test.go deleted file mode 100644 index d49f2eb3..00000000 --- a/backend/internal/service/gemini_messages_compat_service_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package service - -import ( - "testing" -) - -// TestConvertClaudeToolsToGeminiTools_CustomType 测试custom类型工具转换 -func TestConvertClaudeToolsToGeminiTools_CustomType(t *testing.T) { - tests := []struct { - name string - tools any - expectedLen int - description string - }{ - { - name: "Standard tools", - tools: []any{ - map[string]any{ - "name": "get_weather", - "description": "Get weather info", - "input_schema": map[string]any{"type": "object"}, - }, - }, - expectedLen: 1, - description: "标准工具格式应该正常转换", - }, - { - name: "Custom type tool (MCP format)", - tools: []any{ - map[string]any{ - "type": "custom", - "name": "mcp_tool", - "custom": map[string]any{ - "description": "MCP tool description", - "input_schema": map[string]any{"type": "object"}, - }, - }, - }, - expectedLen: 1, - description: "Custom类型工具应该从custom字段读取", - }, - { - name: "Mixed standard and custom tools", - tools: []any{ - map[string]any{ - "name": "standard_tool", - "description": "Standard", - "input_schema": map[string]any{"type": "object"}, - }, - map[string]any{ - "type": "custom", - "name": "custom_tool", - "custom": map[string]any{ - "description": "Custom", - "input_schema": map[string]any{"type": "object"}, - }, - }, - }, - expectedLen: 1, - description: "混合工具应该都能正确转换", - }, - { - name: "Custom tool without custom field", - tools: []any{ - map[string]any{ - "type": "custom", - "name": "invalid_custom", - // 缺少 custom 字段 - }, - }, - expectedLen: 0, // 应该被跳过 - description: "缺少custom字段的custom工具应该被跳过", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := convertClaudeToolsToGeminiTools(tt.tools) - - if tt.expectedLen == 0 { - if result != nil { - t.Errorf("%s: expected nil result, got %v", tt.description, result) - } - return - } - - if result == nil { - t.Fatalf("%s: expected non-nil result", tt.description) - } - - if len(result) != 1 { - t.Errorf("%s: expected 1 tool declaration, got %d", tt.description, len(result)) - return - } - - toolDecl, ok := result[0].(map[string]any) - if !ok { - t.Fatalf("%s: result[0] is not map[string]any", tt.description) - } - - funcDecls, ok := toolDecl["functionDeclarations"].([]any) - if !ok { - t.Fatalf("%s: functionDeclarations is not []any", tt.description) - } - - toolsArr, _ := tt.tools.([]any) - expectedFuncCount := 0 - for _, tool := range toolsArr { - toolMap, _ := tool.(map[string]any) - if toolMap["name"] != "" { - // 检查是否为有效的custom工具 - if toolMap["type"] == "custom" { - if toolMap["custom"] != nil { - expectedFuncCount++ - } - } else { - expectedFuncCount++ - } - } - } - - if len(funcDecls) != expectedFuncCount { - t.Errorf("%s: expected %d function declarations, got %d", - tt.description, expectedFuncCount, len(funcDecls)) - } - }) - } -} diff --git a/backend/internal/service/gemini_oauth_service.go b/backend/internal/service/gemini_oauth_service.go index 221bd0f2..e4bda5f8 100644 --- a/backend/internal/service/gemini_oauth_service.go +++ b/backend/internal/service/gemini_oauth_service.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "net/http" - "regexp" "strconv" "strings" "time" @@ -164,45 +163,6 @@ type GeminiTokenInfo struct { Scope string `json:"scope,omitempty"` ProjectID string `json:"project_id,omitempty"` OAuthType string `json:"oauth_type,omitempty"` // "code_assist" 或 "ai_studio" - TierID string `json:"tier_id,omitempty"` // Gemini Code Assist tier: LEGACY/PRO/ULTRA -} - -// validateTierID validates tier_id format and length -func validateTierID(tierID string) error { - if tierID == "" { - return nil // Empty is allowed - } - if len(tierID) > 64 { - return fmt.Errorf("tier_id exceeds maximum length of 64 characters") - } - // Allow alphanumeric, underscore, hyphen, and slash (for tier paths) - if !regexp.MustCompile(`^[a-zA-Z0-9_/-]+$`).MatchString(tierID) { - return fmt.Errorf("tier_id contains invalid characters") - } - return nil -} - -// extractTierIDFromAllowedTiers extracts tierID from LoadCodeAssist response -// Prioritizes IsDefault tier, falls back to first non-empty tier -func extractTierIDFromAllowedTiers(allowedTiers []geminicli.AllowedTier) string { - tierID := "LEGACY" - // First pass: look for default tier - for _, tier := range allowedTiers { - if tier.IsDefault && strings.TrimSpace(tier.ID) != "" { - tierID = strings.TrimSpace(tier.ID) - break - } - } - // Second pass: if still LEGACY, take first non-empty tier - if tierID == "LEGACY" { - for _, tier := range allowedTiers { - if strings.TrimSpace(tier.ID) != "" { - tierID = strings.TrimSpace(tier.ID) - break - } - } - } - return tierID } func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExchangeCodeInput) (*GeminiTokenInfo, error) { @@ -263,14 +223,13 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - 300 projectID := sessionProjectID - var tierID string // 对于 code_assist 模式,project_id 是必需的 // 对于 ai_studio 模式,project_id 是可选的(不影响使用 AI Studio API) if oauthType == "code_assist" { if projectID == "" { var err error - projectID, tierID, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) + projectID, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) if err != nil { // 记录警告但不阻断流程,允许后续补充 project_id fmt.Printf("[GeminiOAuth] Warning: Failed to fetch project_id during token exchange: %v\n", err) @@ -289,7 +248,6 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch ExpiresAt: expiresAt, Scope: tokenResp.Scope, ProjectID: projectID, - TierID: tierID, OAuthType: oauthType, }, nil } @@ -399,7 +357,7 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A // For Code Assist, project_id is required. Auto-detect if missing. // For AI Studio OAuth, project_id is optional and should not block refresh. if oauthType == "code_assist" && strings.TrimSpace(tokenInfo.ProjectID) == "" { - projectID, tierID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) + projectID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) if err != nil { return nil, fmt.Errorf("failed to auto-detect project_id: %w", err) } @@ -408,7 +366,6 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A return nil, fmt.Errorf("failed to auto-detect project_id: empty result") } tokenInfo.ProjectID = projectID - tokenInfo.TierID = tierID } return tokenInfo, nil @@ -431,13 +388,6 @@ func (s *GeminiOAuthService) BuildAccountCredentials(tokenInfo *GeminiTokenInfo) if tokenInfo.ProjectID != "" { creds["project_id"] = tokenInfo.ProjectID } - if tokenInfo.TierID != "" { - // Validate tier_id before storing - if err := validateTierID(tokenInfo.TierID); err == nil { - creds["tier_id"] = tokenInfo.TierID - } - // Silently skip invalid tier_id (don't block account creation) - } if tokenInfo.OAuthType != "" { creds["oauth_type"] = tokenInfo.OAuthType } @@ -448,26 +398,34 @@ func (s *GeminiOAuthService) Stop() { s.sessionStore.Stop() } -func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, proxyURL string) (string, string, error) { +func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, proxyURL string) (string, error) { if s.codeAssist == nil { - return "", "", errors.New("code assist client not configured") + return "", errors.New("code assist client not configured") } loadResp, loadErr := s.codeAssist.LoadCodeAssist(ctx, accessToken, proxyURL, nil) - - // Extract tierID from response (works whether CloudAICompanionProject is set or not) - tierID := "LEGACY" - if loadResp != nil { - tierID = extractTierIDFromAllowedTiers(loadResp.AllowedTiers) - } - - // If LoadCodeAssist returned a project, use it if loadErr == nil && loadResp != nil && strings.TrimSpace(loadResp.CloudAICompanionProject) != "" { - return strings.TrimSpace(loadResp.CloudAICompanionProject), tierID, nil + return strings.TrimSpace(loadResp.CloudAICompanionProject), nil } // Pick tier from allowedTiers; if no default tier is marked, pick the first non-empty tier ID. - // (tierID already extracted above, reuse it) + tierID := "LEGACY" + if loadResp != nil { + for _, tier := range loadResp.AllowedTiers { + if tier.IsDefault && strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + if strings.TrimSpace(tierID) == "" || tierID == "LEGACY" { + for _, tier := range loadResp.AllowedTiers { + if strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + } + } req := &geminicli.OnboardUserRequest{ TierID: tierID, @@ -485,39 +443,39 @@ func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, pr // If Code Assist onboarding fails (e.g. INVALID_ARGUMENT), fallback to Cloud Resource Manager projects. fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), tierID, nil + return strings.TrimSpace(fallback), nil } - return "", "", err + return "", err } if resp.Done { if resp.Response != nil && resp.Response.CloudAICompanionProject != nil { switch v := resp.Response.CloudAICompanionProject.(type) { case string: - return strings.TrimSpace(v), tierID, nil + return strings.TrimSpace(v), nil case map[string]any: if id, ok := v["id"].(string); ok { - return strings.TrimSpace(id), tierID, nil + return strings.TrimSpace(id), nil } } } fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), tierID, nil + return strings.TrimSpace(fallback), nil } - return "", "", errors.New("onboardUser completed but no project_id returned") + return "", errors.New("onboardUser completed but no project_id returned") } time.Sleep(2 * time.Second) } fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), tierID, nil + return strings.TrimSpace(fallback), nil } if loadErr != nil { - return "", "", fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) + return "", fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) } - return "", "", fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) + return "", fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) } type googleCloudProject struct { diff --git a/backend/internal/service/gemini_token_provider.go b/backend/internal/service/gemini_token_provider.go index 5f369de5..2195ec55 100644 --- a/backend/internal/service/gemini_token_provider.go +++ b/backend/internal/service/gemini_token_provider.go @@ -112,7 +112,7 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou } } - detected, tierID, err := p.geminiOAuthService.fetchProjectID(ctx, accessToken, proxyURL) + detected, err := p.geminiOAuthService.fetchProjectID(ctx, accessToken, proxyURL) if err != nil { log.Printf("[GeminiTokenProvider] Auto-detect project_id failed: %v, fallback to AI Studio API mode", err) return accessToken, nil @@ -123,9 +123,6 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou account.Credentials = make(map[string]any) } account.Credentials["project_id"] = detected - if tierID != "" { - account.Credentials["tier_id"] = tierID - } _ = p.accountRepo.Update(ctx, account) } } diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index f8eb29bd..84e98679 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -13,7 +13,6 @@ import ( "log" "net/http" "regexp" - "sort" "strconv" "strings" "time" @@ -81,7 +80,6 @@ type OpenAIGatewayService struct { userSubRepo UserSubscriptionRepository cache GatewayCache cfg *config.Config - concurrencyService *ConcurrencyService billingService *BillingService rateLimitService *RateLimitService billingCacheService *BillingCacheService @@ -97,7 +95,6 @@ func NewOpenAIGatewayService( userSubRepo UserSubscriptionRepository, cache GatewayCache, cfg *config.Config, - concurrencyService *ConcurrencyService, billingService *BillingService, rateLimitService *RateLimitService, billingCacheService *BillingCacheService, @@ -111,7 +108,6 @@ func NewOpenAIGatewayService( userSubRepo: userSubRepo, cache: cache, cfg: cfg, - concurrencyService: concurrencyService, billingService: billingService, rateLimitService: rateLimitService, billingCacheService: billingCacheService, @@ -130,14 +126,6 @@ func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context) string { return hex.EncodeToString(hash[:]) } -// BindStickySession sets session -> account binding with standard TTL. -func (s *OpenAIGatewayService) BindStickySession(ctx context.Context, sessionHash string, accountID int64) error { - if sessionHash == "" || accountID <= 0 { - return nil - } - return s.cache.SetSessionAccountID(ctx, "openai:"+sessionHash, accountID, openaiStickySessionTTL) -} - // SelectAccount selects an OpenAI account with sticky session support func (s *OpenAIGatewayService) SelectAccount(ctx context.Context, groupID *int64, sessionHash string) (*Account, error) { return s.SelectAccountForModel(ctx, groupID, sessionHash, "") @@ -230,254 +218,6 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C return selected, nil } -// SelectAccountWithLoadAwareness selects an account with load-awareness and wait plan. -func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { - cfg := s.schedulingConfig() - var stickyAccountID int64 - if sessionHash != "" && s.cache != nil { - if accountID, err := s.cache.GetSessionAccountID(ctx, "openai:"+sessionHash); err == nil { - stickyAccountID = accountID - } - } - if s.concurrencyService == nil || !cfg.LoadBatchEnabled { - account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) - if err != nil { - return nil, err - } - result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) - if err == nil && result.Acquired { - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { - waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) - if waitingCount < cfg.StickySessionMaxWaiting { - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: account.ID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil - } - } - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: account.ID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.FallbackWaitTimeout, - MaxWaiting: cfg.FallbackMaxWaiting, - }, - }, nil - } - - accounts, err := s.listSchedulableAccounts(ctx, groupID) - if err != nil { - return nil, err - } - if len(accounts) == 0 { - return nil, errors.New("no available accounts") - } - - isExcluded := func(accountID int64) bool { - if excludedIDs == nil { - return false - } - _, excluded := excludedIDs[accountID] - return excluded - } - - // ============ Layer 1: Sticky session ============ - if sessionHash != "" { - accountID, err := s.cache.GetSessionAccountID(ctx, "openai:"+sessionHash) - if err == nil && accountID > 0 && !isExcluded(accountID) { - account, err := s.accountRepo.GetByID(ctx, accountID) - if err == nil && account.IsSchedulable() && account.IsOpenAI() && - (requestedModel == "" || account.IsModelSupported(requestedModel)) { - result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) - if err == nil && result.Acquired { - _ = s.cache.RefreshSessionTTL(ctx, "openai:"+sessionHash, openaiStickySessionTTL) - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - - waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) - if waitingCount < cfg.StickySessionMaxWaiting { - return &AccountSelectionResult{ - Account: account, - WaitPlan: &AccountWaitPlan{ - AccountID: accountID, - MaxConcurrency: account.Concurrency, - Timeout: cfg.StickySessionWaitTimeout, - MaxWaiting: cfg.StickySessionMaxWaiting, - }, - }, nil - } - } - } - } - - // ============ Layer 2: Load-aware selection ============ - candidates := make([]*Account, 0, len(accounts)) - for i := range accounts { - acc := &accounts[i] - if isExcluded(acc.ID) { - continue - } - if requestedModel != "" && !acc.IsModelSupported(requestedModel) { - continue - } - candidates = append(candidates, acc) - } - - if len(candidates) == 0 { - return nil, errors.New("no available accounts") - } - - accountLoads := make([]AccountWithConcurrency, 0, len(candidates)) - for _, acc := range candidates { - accountLoads = append(accountLoads, AccountWithConcurrency{ - ID: acc.ID, - MaxConcurrency: acc.Concurrency, - }) - } - - loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) - if err != nil { - ordered := append([]*Account(nil), candidates...) - sortAccountsByPriorityAndLastUsed(ordered, false) - for _, acc := range ordered { - result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) - if err == nil && result.Acquired { - if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, "openai:"+sessionHash, acc.ID, openaiStickySessionTTL) - } - return &AccountSelectionResult{ - Account: acc, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - } - } else { - type accountWithLoad struct { - account *Account - loadInfo *AccountLoadInfo - } - var available []accountWithLoad - for _, acc := range candidates { - loadInfo := loadMap[acc.ID] - if loadInfo == nil { - loadInfo = &AccountLoadInfo{AccountID: acc.ID} - } - if loadInfo.LoadRate < 100 { - available = append(available, accountWithLoad{ - account: acc, - loadInfo: loadInfo, - }) - } - } - - if len(available) > 0 { - sort.SliceStable(available, func(i, j int) bool { - a, b := available[i], available[j] - if a.account.Priority != b.account.Priority { - return a.account.Priority < b.account.Priority - } - if a.loadInfo.LoadRate != b.loadInfo.LoadRate { - return a.loadInfo.LoadRate < b.loadInfo.LoadRate - } - switch { - case a.account.LastUsedAt == nil && b.account.LastUsedAt != nil: - return true - case a.account.LastUsedAt != nil && b.account.LastUsedAt == nil: - return false - case a.account.LastUsedAt == nil && b.account.LastUsedAt == nil: - return false - default: - return a.account.LastUsedAt.Before(*b.account.LastUsedAt) - } - }) - - for _, item := range available { - result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) - if err == nil && result.Acquired { - if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, "openai:"+sessionHash, item.account.ID, openaiStickySessionTTL) - } - return &AccountSelectionResult{ - Account: item.account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil - } - } - } - } - - // ============ Layer 3: Fallback wait ============ - sortAccountsByPriorityAndLastUsed(candidates, false) - for _, acc := range candidates { - return &AccountSelectionResult{ - Account: acc, - WaitPlan: &AccountWaitPlan{ - AccountID: acc.ID, - MaxConcurrency: acc.Concurrency, - Timeout: cfg.FallbackWaitTimeout, - MaxWaiting: cfg.FallbackMaxWaiting, - }, - }, nil - } - - return nil, errors.New("no available accounts") -} - -func (s *OpenAIGatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64) ([]Account, error) { - var accounts []Account - var err error - if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) - } else if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, PlatformOpenAI) - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) - } - if err != nil { - return nil, fmt.Errorf("query accounts failed: %w", err) - } - return accounts, nil -} - -func (s *OpenAIGatewayService) tryAcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) { - if s.concurrencyService == nil { - return &AcquireResult{Acquired: true, ReleaseFunc: func() {}}, nil - } - return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) -} - -func (s *OpenAIGatewayService) schedulingConfig() config.GatewaySchedulingConfig { - if s.cfg != nil { - return s.cfg.Gateway.Scheduling - } - return config.GatewaySchedulingConfig{ - StickySessionMaxWaiting: 3, - StickySessionWaitTimeout: 45 * time.Second, - FallbackWaitTimeout: 30 * time.Second, - FallbackMaxWaiting: 100, - LoadBatchEnabled: true, - SlotCleanupInterval: 30 * time.Second, - } -} - // GetAccessToken gets the access token for an OpenAI account func (s *OpenAIGatewayService) GetAccessToken(ctx context.Context, account *Account) (string, string, error) { switch account.Type { diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index a202ccf2..81e01d47 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -73,15 +73,6 @@ func ProvideDeferredService(accountRepo AccountRepository, timingWheel *TimingWh return svc } -// ProvideConcurrencyService creates ConcurrencyService and starts slot cleanup worker. -func ProvideConcurrencyService(cache ConcurrencyCache, accountRepo AccountRepository, cfg *config.Config) *ConcurrencyService { - svc := NewConcurrencyService(cache) - if cfg != nil { - svc.StartSlotCleanupWorker(accountRepo, cfg.Gateway.Scheduling.SlotCleanupInterval) - } - return svc -} - // ProviderSet is the Wire provider set for all services var ProviderSet = wire.NewSet( // Core services @@ -116,7 +107,7 @@ var ProviderSet = wire.NewSet( ProvideEmailQueueService, NewTurnstileService, NewSubscriptionService, - ProvideConcurrencyService, + NewConcurrencyService, NewIdentityService, NewCRSSyncService, ProvideUpdateService, diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 5478d151..5bd85d7d 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -122,21 +122,6 @@ pricing: # Hash check interval in minutes hash_check_interval_minutes: 10 -# ============================================================================= -# Gateway (Optional) -# ============================================================================= -gateway: - # Wait time (in seconds) for upstream response headers (streaming body not affected) - response_header_timeout: 300 - # Log upstream error response body summary (safe/truncated; does not log request content) - log_upstream_error_body: false - # Max bytes to log from upstream error body - log_upstream_error_body_max_bytes: 2048 - # Auto inject anthropic-beta for API-key accounts when needed (default off) - inject_beta_for_apikey: false - # Allow failover on selected 400 errors (default off) - failover_on_400: false - # ============================================================================= # Gemini OAuth (Required for Gemini accounts) # ============================================================================= diff --git a/deploy/flow.md b/deploy/flow.md deleted file mode 100644 index 0904c72f..00000000 --- a/deploy/flow.md +++ /dev/null @@ -1,222 +0,0 @@ -```mermaid -flowchart TD - %% Master dispatch - A[HTTP Request] --> B{Route} - B -->|v1 messages| GA0 - B -->|openai v1 responses| OA0 - B -->|v1beta models model action| GM0 - B -->|v1 messages count tokens| GT0 - B -->|v1beta models list or get| GL0 - - %% ========================= - %% FLOW A: Claude Gateway - %% ========================= - subgraph FLOW_A["v1 messages Claude Gateway"] - GA0[Auth middleware] --> GA1[Read body] - GA1 -->|empty| GA1E[400 invalid_request_error] - GA1 --> GA2[ParseGatewayRequest] - GA2 -->|parse error| GA2E[400 invalid_request_error] - GA2 --> GA3{model present} - GA3 -->|no| GA3E[400 invalid_request_error] - GA3 --> GA4[streamStarted false] - GA4 --> GA5[IncrementWaitCount user] - GA5 -->|queue full| GA5E[429 rate_limit_error] - GA5 --> GA6[AcquireUserSlotWithWait] - GA6 -->|timeout or fail| GA6E[429 rate_limit_error] - GA6 --> GA7[BillingEligibility check post wait] - GA7 -->|fail| GA7E[403 billing_error] - GA7 --> GA8[Generate sessionHash] - GA8 --> GA9[Resolve platform] - GA9 --> GA10{platform gemini} - GA10 -->|yes| GA10Y[sessionKey gemini hash] - GA10 -->|no| GA10N[sessionKey hash] - GA10Y --> GA11 - GA10N --> GA11 - - GA11[SelectAccountWithLoadAwareness] -->|err and no failed| GA11E1[503 no available accounts] - GA11 -->|err and failed| GA11E2[map failover error] - GA11 --> GA12[Warmup intercept] - GA12 -->|yes| GA12Y[return mock and release if held] - GA12 -->|no| GA13[Acquire account slot or wait] - GA13 -->|wait queue full| GA13E1[429 rate_limit_error] - GA13 -->|wait timeout| GA13E2[429 concurrency limit] - GA13 --> GA14[BindStickySession if waited] - GA14 --> GA15{account platform antigravity} - GA15 -->|yes| GA15Y[ForwardGemini antigravity] - GA15 -->|no| GA15N[Forward Claude] - GA15Y --> GA16[Release account slot and dec account wait] - GA15N --> GA16 - GA16 --> GA17{UpstreamFailoverError} - GA17 -->|yes| GA18[mark failedAccountIDs and map error if exceed] - GA18 -->|loop| GA11 - GA17 -->|no| GA19[success async RecordUsage and return] - GA19 --> GA20[defer release user slot and dec wait count] - end - - %% ========================= - %% FLOW B: OpenAI - %% ========================= - subgraph FLOW_B["openai v1 responses"] - OA0[Auth middleware] --> OA1[Read body] - OA1 -->|empty| OA1E[400 invalid_request_error] - OA1 --> OA2[json Unmarshal body] - OA2 -->|parse error| OA2E[400 invalid_request_error] - OA2 --> OA3{model present} - OA3 -->|no| OA3E[400 invalid_request_error] - OA3 --> OA4{User Agent Codex CLI} - OA4 -->|no| OA4N[set default instructions] - OA4 -->|yes| OA4Y[no change] - OA4N --> OA5 - OA4Y --> OA5 - OA5[streamStarted false] --> OA6[IncrementWaitCount user] - OA6 -->|queue full| OA6E[429 rate_limit_error] - OA6 --> OA7[AcquireUserSlotWithWait] - OA7 -->|timeout or fail| OA7E[429 rate_limit_error] - OA7 --> OA8[BillingEligibility check post wait] - OA8 -->|fail| OA8E[403 billing_error] - OA8 --> OA9[sessionHash sha256 session_id] - OA9 --> OA10[SelectAccountWithLoadAwareness] - OA10 -->|err and no failed| OA10E1[503 no available accounts] - OA10 -->|err and failed| OA10E2[map failover error] - OA10 --> OA11[Acquire account slot or wait] - OA11 -->|wait queue full| OA11E1[429 rate_limit_error] - OA11 -->|wait timeout| OA11E2[429 concurrency limit] - OA11 --> OA12[BindStickySession openai hash if waited] - OA12 --> OA13[Forward OpenAI upstream] - OA13 --> OA14[Release account slot and dec account wait] - OA14 --> OA15{UpstreamFailoverError} - OA15 -->|yes| OA16[mark failedAccountIDs and map error if exceed] - OA16 -->|loop| OA10 - OA15 -->|no| OA17[success async RecordUsage and return] - OA17 --> OA18[defer release user slot and dec wait count] - end - - %% ========================= - %% FLOW C: Gemini Native - %% ========================= - subgraph FLOW_C["v1beta models model action Gemini Native"] - GM0[Auth middleware] --> GM1[Validate platform] - GM1 -->|invalid| GM1E[400 googleError] - GM1 --> GM2[Parse path modelName action] - GM2 -->|invalid| GM2E[400 googleError] - GM2 --> GM3{action supported} - GM3 -->|no| GM3E[404 googleError] - GM3 --> GM4[Read body] - GM4 -->|empty| GM4E[400 googleError] - GM4 --> GM5[streamStarted false] - GM5 --> GM6[IncrementWaitCount user] - GM6 -->|queue full| GM6E[429 googleError] - GM6 --> GM7[AcquireUserSlotWithWait] - GM7 -->|timeout or fail| GM7E[429 googleError] - GM7 --> GM8[BillingEligibility check post wait] - GM8 -->|fail| GM8E[403 googleError] - GM8 --> GM9[Generate sessionHash] - GM9 --> GM10[sessionKey gemini hash] - GM10 --> GM11[SelectAccountWithLoadAwareness] - GM11 -->|err and no failed| GM11E1[503 googleError] - GM11 -->|err and failed| GM11E2[mapGeminiUpstreamError] - GM11 --> GM12[Acquire account slot or wait] - GM12 -->|wait queue full| GM12E1[429 googleError] - GM12 -->|wait timeout| GM12E2[429 googleError] - GM12 --> GM13[BindStickySession if waited] - GM13 --> GM14{account platform antigravity} - GM14 -->|yes| GM14Y[ForwardGemini antigravity] - GM14 -->|no| GM14N[ForwardNative] - GM14Y --> GM15[Release account slot and dec account wait] - GM14N --> GM15 - GM15 --> GM16{UpstreamFailoverError} - GM16 -->|yes| GM17[mark failedAccountIDs and map error if exceed] - GM17 -->|loop| GM11 - GM16 -->|no| GM18[success async RecordUsage and return] - GM18 --> GM19[defer release user slot and dec wait count] - end - - %% ========================= - %% FLOW D: CountTokens - %% ========================= - subgraph FLOW_D["v1 messages count tokens"] - GT0[Auth middleware] --> GT1[Read body] - GT1 -->|empty| GT1E[400 invalid_request_error] - GT1 --> GT2[ParseGatewayRequest] - GT2 -->|parse error| GT2E[400 invalid_request_error] - GT2 --> GT3{model present} - GT3 -->|no| GT3E[400 invalid_request_error] - GT3 --> GT4[BillingEligibility check] - GT4 -->|fail| GT4E[403 billing_error] - GT4 --> GT5[ForwardCountTokens] - end - - %% ========================= - %% FLOW E: Gemini Models List Get - %% ========================= - subgraph FLOW_E["v1beta models list or get"] - GL0[Auth middleware] --> GL1[Validate platform] - GL1 -->|invalid| GL1E[400 googleError] - GL1 --> GL2{force platform antigravity} - GL2 -->|yes| GL2Y[return static fallback models] - GL2 -->|no| GL3[SelectAccountForAIStudioEndpoints] - GL3 -->|no gemini and has antigravity| GL3Y[return fallback models] - GL3 -->|no accounts| GL3E[503 googleError] - GL3 --> GL4[ForwardAIStudioGET] - GL4 -->|error| GL4E[502 googleError] - GL4 --> GL5[Passthrough response or fallback] - end - - %% ========================= - %% SHARED: Account Selection - %% ========================= - subgraph SELECT["SelectAccountWithLoadAwareness detail"] - S0[Start] --> S1{concurrencyService nil OR load batch disabled} - S1 -->|yes| S2[SelectAccountForModelWithExclusions legacy] - S2 --> S3[tryAcquireAccountSlot] - S3 -->|acquired| S3Y[SelectionResult Acquired true ReleaseFunc] - S3 -->|not acquired| S3N[WaitPlan FallbackTimeout MaxWaiting] - S1 -->|no| S4[Resolve platform] - S4 --> S5[List schedulable accounts] - S5 --> S6[Layer1 Sticky session] - S6 -->|hit and valid| S6A[tryAcquireAccountSlot] - S6A -->|acquired| S6AY[SelectionResult Acquired true] - S6A -->|not acquired and waitingCount < StickyMax| S6AN[WaitPlan StickyTimeout Max] - S6 --> S7[Layer2 Load aware] - S7 --> S7A[Load batch concurrency plus wait to loadRate] - S7A --> S7B[Sort priority load LRU OAuth prefer for Gemini] - S7B --> S7C[tryAcquireAccountSlot in order] - S7C -->|first success| S7CY[SelectionResult Acquired true] - S7C -->|none| S8[Layer3 Fallback wait] - S8 --> S8A[Sort priority LRU] - S8A --> S8B[WaitPlan FallbackTimeout Max] - end - - %% ========================= - %% SHARED: Wait Acquire - %% ========================= - subgraph WAIT["AcquireXSlotWithWait detail"] - W0[Try AcquireXSlot immediately] -->|acquired| W1[return ReleaseFunc] - W0 -->|not acquired| W2[Wait loop with timeout] - W2 --> W3[Backoff 100ms x1.5 jitter max2s] - W2 --> W4[If streaming and ping format send SSE ping] - W2 --> W5[Retry AcquireXSlot on timer] - W5 -->|acquired| W1 - W2 -->|timeout| W6[ConcurrencyError IsTimeout true] - end - - %% ========================= - %% SHARED: Account Wait Queue - %% ========================= - subgraph AQ["Account Wait Queue Redis Lua"] - Q1[IncrementAccountWaitCount] --> Q2{current >= max} - Q2 -->|yes| Q2Y[return false] - Q2 -->|no| Q3[INCR and if first set TTL] - Q3 --> Q4[return true] - Q5[DecrementAccountWaitCount] --> Q6[if current > 0 then DECR] - end - - %% ========================= - %% SHARED: Background cleanup - %% ========================= - subgraph CLEANUP["Slot Cleanup Worker"] - C0[StartSlotCleanupWorker interval] --> C1[List schedulable accounts] - C1 --> C2[CleanupExpiredAccountSlots per account] - C2 --> C3[Repeat every interval] - end -``` diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 1770a985..6563ee0c 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -952,7 +952,6 @@ "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "undici-types": "~6.21.0" } @@ -1368,7 +1367,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -1445,7 +1443,6 @@ "resolved": "https://registry.npmmirror.com/chart.js/-/chart.js-4.5.1.tgz", "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", "license": "MIT", - "peer": true, "dependencies": { "@kurkle/color": "^0.3.0" }, @@ -2043,7 +2040,6 @@ "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", "dev": true, "license": "MIT", - "peer": true, "bin": { "jiti": "bin/jiti.js" } @@ -2352,7 +2348,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -2826,7 +2821,6 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -2860,7 +2854,6 @@ "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "devOptional": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -2933,7 +2926,6 @@ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", @@ -3105,7 +3097,6 @@ "resolved": "https://registry.npmmirror.com/vue/-/vue-3.5.25.tgz", "integrity": "sha512-YLVdgv2K13WJ6n+kD5owehKtEXwdwXuj2TTyJMsO7pSeKw2bfRNZGjhB7YzrpbMYj5b5QsUebHpOqR3R3ziy/g==", "license": "MIT", - "peer": true, "dependencies": { "@vue/compiler-dom": "3.5.25", "@vue/compiler-sfc": "3.5.25", @@ -3199,7 +3190,6 @@ "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@volar/typescript": "2.4.15", "@vue/language-core": "2.2.12" diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index 914678a5..c1ca08fa 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -83,14 +83,6 @@ > - - - - {{ tierDisplay }} - @@ -148,23 +140,4 @@ const statusText = computed(() => { return props.account.status }) -// Computed: tier display -const tierDisplay = computed(() => { - const credentials = props.account.credentials as Record | undefined - const tierId = credentials?.tier_id - if (!tierId || tierId === 'unknown') return null - - const tierMap: Record = { - 'free': 'Free', - 'payg': 'Pay-as-you-go', - 'pay-as-you-go': 'Pay-as-you-go', - 'enterprise': 'Enterprise', - 'LEGACY': 'Legacy', - 'PRO': 'Pro', - 'ULTRA': 'Ultra' - } - - return tierMap[tierId] || tierId -}) - From b6d1e7a0846d8946ce3a9dbd3d3606db2e410d55 Mon Sep 17 00:00:00 2001 From: IanShaw <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 10:45:57 +0800 Subject: [PATCH 33/51] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20/v1/messages?= =?UTF-8?q?=20=E9=97=B4=E6=AD=87=E6=80=A7=20400=20=E9=94=99=E8=AF=AF=20(#1?= =?UTF-8?q?12)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(upstream): 修复上游格式兼容性问题 - 跳过Claude模型无signature的thinking block - 支持custom类型工具(MCP)格式转换 - 添加ClaudeCustomToolSpec结构体支持MCP工具 - 添加Custom字段验证,跳过无效custom工具 - 在convertClaudeToolsToGeminiTools中添加schema清理 - 完整的单元测试覆盖,包含边界情况 修复: Issue 0.1 signature缺失, Issue 0.2 custom工具格式 改进: Codex审查发现的2个重要问题 测试: - TestBuildParts_ThinkingBlockWithoutSignature: 验证thinking block处理 - TestBuildTools_CustomTypeTools: 验证custom工具转换和边界情况 - TestConvertClaudeToolsToGeminiTools_CustomType: 验证service层转换 * feat(gemini): 添加Gemini限额与TierID支持 实现PR1:Gemini限额与TierID功能 后端修改: - GeminiTokenInfo结构体添加TierID字段 - fetchProjectID函数返回(projectID, tierID, error) - 从LoadCodeAssist响应中提取tierID(优先IsDefault,回退到第一个非空tier) - ExchangeCode、RefreshAccountToken、GetAccessToken函数更新以处理tierID - BuildAccountCredentials函数保存tier_id到credentials 前端修改: - AccountStatusIndicator组件添加tier显示 - 支持LEGACY/PRO/ULTRA等tier类型的友好显示 - 使用蓝色badge展示tier信息 技术细节: - tierID提取逻辑:优先选择IsDefault的tier,否则选择第一个非空tier - 所有fetchProjectID调用点已更新以处理新的返回签名 - 前端gracefully处理missing/unknown tier_id * refactor(gemini): 优化TierID实现并添加安全验证 根据并发代码审查(code-reviewer, security-auditor, gemini, codex)的反馈进行改进: 安全改进: - 添加validateTierID函数验证tier_id格式和长度(最大64字符) - 限制tier_id字符集为字母数字、下划线、连字符和斜杠 - 在BuildAccountCredentials中验证tier_id后再存储 - 静默跳过无效tier_id,不阻塞账户创建 代码质量改进: - 提取extractTierIDFromAllowedTiers辅助函数消除重复代码 - 重构fetchProjectID函数,tierID提取逻辑只执行一次 - 改进代码可读性和可维护性 审查工具: - code-reviewer agent (a09848e) - security-auditor agent (a9a149c) - gemini CLI (bcc7c81) - codex (b5d8919) 修复问题: - HIGH: 未验证的tier_id输入 - MEDIUM: 代码重复(tierID提取逻辑重复2次) * fix(format): 修复 gofmt 格式问题 - 修复 claude_types.go 中的字段对齐问题 - 修复 gemini_messages_compat_service.go 中的缩进问题 * fix(upstream): 修复上游格式兼容性问题 (#14) * fix(upstream): 修复上游格式兼容性问题 - 跳过Claude模型无signature的thinking block - 支持custom类型工具(MCP)格式转换 - 添加ClaudeCustomToolSpec结构体支持MCP工具 - 添加Custom字段验证,跳过无效custom工具 - 在convertClaudeToolsToGeminiTools中添加schema清理 - 完整的单元测试覆盖,包含边界情况 修复: Issue 0.1 signature缺失, Issue 0.2 custom工具格式 改进: Codex审查发现的2个重要问题 测试: - TestBuildParts_ThinkingBlockWithoutSignature: 验证thinking block处理 - TestBuildTools_CustomTypeTools: 验证custom工具转换和边界情况 - TestConvertClaudeToolsToGeminiTools_CustomType: 验证service层转换 * fix(format): 修复 gofmt 格式问题 - 修复 claude_types.go 中的字段对齐问题 - 修复 gemini_messages_compat_service.go 中的缩进问题 * fix(format): 修复 claude_types.go 的 gofmt 格式问题 * feat(antigravity): 优化 thinking block 和 schema 处理 - 为 dummy thinking block 添加 ThoughtSignature - 重构 thinking block 处理逻辑,在每个条件分支内创建 part - 优化 excludedSchemaKeys,移除 Gemini 实际支持的字段 (minItems, maxItems, minimum, maximum, additionalProperties, format) - 添加详细注释说明 Gemini API 支持的 schema 字段 * fix(antigravity): 增强 schema 清理的安全性 基于 Codex review 建议: - 添加 format 字段白名单过滤,只保留 Gemini 支持的 date-time/date/time - 补充更多不支持的 schema 关键字到黑名单: * 组合 schema: oneOf, anyOf, allOf, not, if/then/else * 对象验证: minProperties, maxProperties, patternProperties 等 * 定义引用: $defs, definitions - 避免不支持的 schema 字段导致 Gemini API 校验失败 * fix(lint): 修复 gemini_messages_compat_service 空分支警告 - 在 cleanToolSchema 的 if 语句中添加 continue - 移除重复的注释 * fix(antigravity): 移除 minItems/maxItems 以兼容 Claude API - 将 minItems 和 maxItems 添加到 schema 黑名单 - Claude API (Vertex AI) 不支持这些数组验证字段 - 添加调试日志记录工具 schema 转换过程 - 修复 tools.14.custom.input_schema 验证错误 * fix(antigravity): 修复 additionalProperties schema 对象问题 - 将 additionalProperties 的 schema 对象转换为布尔值 true - Claude API 只支持 additionalProperties: false,不支持 schema 对象 - 修复 tools.14.custom.input_schema 验证错误 - 参考 Claude 官方文档的 JSON Schema 限制 * fix(antigravity): 修复 Claude 模型 thinking 块兼容性问题 - 完全跳过 Claude 模型的 thinking 块以避免 signature 验证失败 - 只在 Gemini 模型中使用 dummy thought signature - 修改 additionalProperties 默认值为 false(更安全) - 添加调试日志以便排查问题 * fix(upstream): 修复跨模型切换时的 dummy signature 问题 基于 Codex review 和用户场景分析的修复: 1. 问题场景 - Gemini (thinking) → Claude (thinking) 切换时 - Gemini 返回的 thinking 块使用 dummy signature - Claude API 会拒绝 dummy signature,导致 400 错误 2. 修复内容 - request_transformer.go:262: 跳过 dummy signature - 只保留真实的 Claude signature - 支持频繁的跨模型切换 3. 其他修复(基于 Codex review) - gateway_service.go:691: 修复 io.ReadAll 错误处理 - gateway_service.go:687: 条件日志(尊重 LogUpstreamErrorBody 配置) - gateway_service.go:915: 收紧 400 failover 启发式 - request_transformer.go:188: 移除签名成功日志 4. 新增功能(默认关闭) - 阶段 1: 上游错误日志(GATEWAY_LOG_UPSTREAM_ERROR_BODY) - 阶段 2: Antigravity thinking 修复 - 阶段 3: API-key beta 注入(GATEWAY_INJECT_BETA_FOR_APIKEY) - 阶段 3: 智能 400 failover(GATEWAY_FAILOVER_ON_400) 测试:所有测试通过 * fix(lint): 修复 golangci-lint 问题 - 应用 De Morgan 定律简化条件判断 - 修复 gofmt 格式问题 - 移除未使用的 min 函数 --- backend/internal/config/config.go | 15 ++ .../internal/pkg/antigravity/claude_types.go | 3 + .../pkg/antigravity/request_transformer.go | 223 +++++++++++++----- .../antigravity/request_transformer_test.go | 179 ++++++++++++++ backend/internal/pkg/claude/constants.go | 6 + .../service/antigravity_gateway_service.go | 9 + backend/internal/service/gateway_service.go | 138 +++++++++++ .../service/gemini_messages_compat_service.go | 39 ++- .../gemini_messages_compat_service_test.go | 128 ++++++++++ .../internal/service/gemini_oauth_service.go | 104 +++++--- .../internal/service/gemini_token_provider.go | 5 +- deploy/config.example.yaml | 15 ++ frontend/package-lock.json | 10 + .../account/AccountStatusIndicator.vue | 27 +++ 14 files changed, 815 insertions(+), 86 deletions(-) create mode 100644 backend/internal/pkg/antigravity/request_transformer_test.go create mode 100644 backend/internal/service/gemini_messages_compat_service_test.go diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index aeeddcb4..d3674932 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -119,6 +119,17 @@ type GatewayConfig struct { // ConcurrencySlotTTLMinutes: 并发槽位过期时间(分钟) // 应大于最长 LLM 请求时间,防止请求完成前槽位过期 ConcurrencySlotTTLMinutes int `mapstructure:"concurrency_slot_ttl_minutes"` + + // 是否记录上游错误响应体摘要(避免输出请求内容) + LogUpstreamErrorBody bool `mapstructure:"log_upstream_error_body"` + // 上游错误响应体记录最大字节数(超过会截断) + LogUpstreamErrorBodyMaxBytes int `mapstructure:"log_upstream_error_body_max_bytes"` + + // API-key 账号在客户端未提供 anthropic-beta 时,是否按需自动补齐(默认关闭以保持兼容) + InjectBetaForApiKey bool `mapstructure:"inject_beta_for_apikey"` + + // 是否允许对部分 400 错误触发 failover(默认关闭以避免改变语义) + FailoverOn400 bool `mapstructure:"failover_on_400"` } func (s *ServerConfig) Address() string { @@ -313,6 +324,10 @@ func setDefaults() { // Gateway viper.SetDefault("gateway.response_header_timeout", 300) // 300秒(5分钟)等待上游响应头,LLM高负载时可能排队较久 + viper.SetDefault("gateway.log_upstream_error_body", false) + viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048) + viper.SetDefault("gateway.inject_beta_for_apikey", false) + viper.SetDefault("gateway.failover_on_400", false) viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) viper.SetDefault("gateway.connection_pool_isolation", ConnectionPoolIsolationAccountProxy) // HTTP 上游连接池配置(针对 5000+ 并发用户优化) diff --git a/backend/internal/pkg/antigravity/claude_types.go b/backend/internal/pkg/antigravity/claude_types.go index 01b805cd..34e6b1f4 100644 --- a/backend/internal/pkg/antigravity/claude_types.go +++ b/backend/internal/pkg/antigravity/claude_types.go @@ -54,6 +54,9 @@ type CustomToolSpec struct { InputSchema map[string]any `json:"input_schema"` } +// ClaudeCustomToolSpec 兼容旧命名(MCP custom 工具规格) +type ClaudeCustomToolSpec = CustomToolSpec + // SystemBlock system prompt 数组形式的元素 type SystemBlock struct { Type string `json:"type"` diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go index e0b5b886..83b87a32 100644 --- a/backend/internal/pkg/antigravity/request_transformer.go +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -14,13 +14,16 @@ func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel st // 用于存储 tool_use id -> name 映射 toolIDToName := make(map[string]string) - // 检测是否启用 thinking - isThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" - // 只有 Gemini 模型支持 dummy thought workaround // Claude 模型通过 Vertex/Google API 需要有效的 thought signatures allowDummyThought := strings.HasPrefix(mappedModel, "gemini-") + // 检测是否启用 thinking + requestedThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" + // 为避免 Claude 模型的 thought signature/消息块约束导致 400(上游要求 thinking 块开头等), + // 非 Gemini 模型默认不启用 thinking(除非未来支持完整签名链路)。 + isThinkingEnabled := requestedThinkingEnabled && allowDummyThought + // 1. 构建 contents contents, err := buildContents(claudeReq.Messages, toolIDToName, isThinkingEnabled, allowDummyThought) if err != nil { @@ -31,7 +34,15 @@ func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel st systemInstruction := buildSystemInstruction(claudeReq.System, claudeReq.Model) // 3. 构建 generationConfig - generationConfig := buildGenerationConfig(claudeReq) + reqForGen := claudeReq + if requestedThinkingEnabled && !allowDummyThought { + log.Printf("[Warning] Disabling thinking for non-Gemini model in antigravity transform: model=%s", mappedModel) + // shallow copy to avoid mutating caller's request + clone := *claudeReq + clone.Thinking = nil + reqForGen = &clone + } + generationConfig := buildGenerationConfig(reqForGen) // 4. 构建 tools tools := buildTools(claudeReq.Tools) @@ -148,8 +159,9 @@ func buildContents(messages []ClaudeMessage, toolIDToName map[string]string, isT if !hasThoughtPart && len(parts) > 0 { // 在开头添加 dummy thinking block parts = append([]GeminiPart{{ - Text: "Thinking...", - Thought: true, + Text: "Thinking...", + Thought: true, + ThoughtSignature: dummyThoughtSignature, }}, parts...) } } @@ -171,6 +183,34 @@ func buildContents(messages []ClaudeMessage, toolIDToName map[string]string, isT // 参考: https://ai.google.dev/gemini-api/docs/thought-signatures const dummyThoughtSignature = "skip_thought_signature_validator" +// isValidThoughtSignature 验证 thought signature 是否有效 +// Claude API 要求 signature 必须是 base64 编码的字符串,长度至少 32 字节 +func isValidThoughtSignature(signature string) bool { + // 空字符串无效 + if signature == "" { + return false + } + + // signature 应该是 base64 编码,长度至少 40 个字符(约 30 字节) + // 参考 Claude API 文档和实际观察到的有效 signature + if len(signature) < 40 { + log.Printf("[Debug] Signature too short: len=%d", len(signature)) + return false + } + + // 检查是否是有效的 base64 字符 + // base64 字符集: A-Z, a-z, 0-9, +, /, = + for i, c := range signature { + if (c < 'A' || c > 'Z') && (c < 'a' || c > 'z') && + (c < '0' || c > '9') && c != '+' && c != '/' && c != '=' { + log.Printf("[Debug] Invalid base64 character at position %d: %c (code=%d)", i, c, c) + return false + } + } + + return true +} + // buildParts 构建消息的 parts // allowDummyThought: 只有 Gemini 模型支持 dummy thought signature func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDummyThought bool) ([]GeminiPart, error) { @@ -199,22 +239,30 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu } case "thinking": - part := GeminiPart{ - Text: block.Thinking, - Thought: true, - } - // 保留原有 signature(Claude 模型需要有效的 signature) - if block.Signature != "" { - part.ThoughtSignature = block.Signature - } else if !allowDummyThought { - // Claude 模型需要有效 signature,跳过无 signature 的 thinking block - log.Printf("Warning: skipping thinking block without signature for Claude model") + if allowDummyThought { + // Gemini 模型可以使用 dummy signature + parts = append(parts, GeminiPart{ + Text: block.Thinking, + Thought: true, + ThoughtSignature: dummyThoughtSignature, + }) continue - } else { - // Gemini 模型使用 dummy signature - part.ThoughtSignature = dummyThoughtSignature } - parts = append(parts, part) + + // Claude 模型:仅在提供有效 signature 时保留 thinking block;否则跳过以避免上游校验失败。 + signature := strings.TrimSpace(block.Signature) + if signature == "" || signature == dummyThoughtSignature { + log.Printf("[Warning] Skipping thinking block for Claude model (missing or dummy signature)") + continue + } + if !isValidThoughtSignature(signature) { + log.Printf("[Debug] Thinking signature may be invalid (passing through anyway): len=%d", len(signature)) + } + parts = append(parts, GeminiPart{ + Text: block.Thinking, + Thought: true, + ThoughtSignature: signature, + }) case "image": if block.Source != nil && block.Source.Type == "base64" { @@ -239,10 +287,9 @@ func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDu ID: block.ID, }, } - // 保留原有 signature,或对 Gemini 模型使用 dummy signature - if block.Signature != "" { - part.ThoughtSignature = block.Signature - } else if allowDummyThought { + // 只有 Gemini 模型使用 dummy signature + // Claude 模型不设置 signature(避免验证问题) + if allowDummyThought { part.ThoughtSignature = dummyThoughtSignature } parts = append(parts, part) @@ -386,9 +433,9 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 普通工具 var funcDecls []GeminiFunctionDecl - for _, tool := range tools { + for i, tool := range tools { // 跳过无效工具名称 - if tool.Name == "" { + if strings.TrimSpace(tool.Name) == "" { log.Printf("Warning: skipping tool with empty name") continue } @@ -397,10 +444,18 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { var inputSchema map[string]any // 检查是否为 custom 类型工具 (MCP) - if tool.Type == "custom" && tool.Custom != nil { - // Custom 格式: 从 custom 字段获取 description 和 input_schema + if tool.Type == "custom" { + if tool.Custom == nil || tool.Custom.InputSchema == nil { + log.Printf("[Warning] Skipping invalid custom tool '%s': missing custom spec or input_schema", tool.Name) + continue + } description = tool.Custom.Description inputSchema = tool.Custom.InputSchema + + // 调试日志:记录 custom 工具的 schema + if schemaJSON, err := json.Marshal(inputSchema); err == nil { + log.Printf("[Debug] Tool[%d] '%s' (custom) original schema: %s", i, tool.Name, string(schemaJSON)) + } } else { // 标准格式: 从顶层字段获取 description = tool.Description @@ -409,7 +464,6 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { // 清理 JSON Schema params := cleanJSONSchema(inputSchema) - // 为 nil schema 提供默认值 if params == nil { params = map[string]any{ @@ -418,6 +472,11 @@ func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { } } + // 调试日志:记录清理后的 schema + if paramsJSON, err := json.Marshal(params); err == nil { + log.Printf("[Debug] Tool[%d] '%s' cleaned schema: %s", i, tool.Name, string(paramsJSON)) + } + funcDecls = append(funcDecls, GeminiFunctionDecl{ Name: tool.Name, Description: description, @@ -479,31 +538,64 @@ func cleanJSONSchema(schema map[string]any) map[string]any { } // excludedSchemaKeys 不支持的 schema 字段 +// 基于 Claude API (Vertex AI) 的实际支持情况 +// 支持: type, description, enum, properties, required, additionalProperties, items +// 不支持: minItems, maxItems, minLength, maxLength, pattern, minimum, maximum 等验证字段 var excludedSchemaKeys = map[string]bool{ - "$schema": true, - "$id": true, - "$ref": true, - "additionalProperties": true, - "minLength": true, - "maxLength": true, - "minItems": true, - "maxItems": true, - "uniqueItems": true, - "minimum": true, - "maximum": true, - "exclusiveMinimum": true, - "exclusiveMaximum": true, - "pattern": true, - "format": true, - "default": true, - "strict": true, - "const": true, - "examples": true, - "deprecated": true, - "readOnly": true, - "writeOnly": true, - "contentMediaType": true, - "contentEncoding": true, + // 元 schema 字段 + "$schema": true, + "$id": true, + "$ref": true, + + // 字符串验证(Gemini 不支持) + "minLength": true, + "maxLength": true, + "pattern": true, + + // 数字验证(Claude API 通过 Vertex AI 不支持这些字段) + "minimum": true, + "maximum": true, + "exclusiveMinimum": true, + "exclusiveMaximum": true, + "multipleOf": true, + + // 数组验证(Claude API 通过 Vertex AI 不支持这些字段) + "uniqueItems": true, + "minItems": true, + "maxItems": true, + + // 组合 schema(Gemini 不支持) + "oneOf": true, + "anyOf": true, + "allOf": true, + "not": true, + "if": true, + "then": true, + "else": true, + "$defs": true, + "definitions": true, + + // 对象验证(仅保留 properties/required/additionalProperties) + "minProperties": true, + "maxProperties": true, + "patternProperties": true, + "propertyNames": true, + "dependencies": true, + "dependentSchemas": true, + "dependentRequired": true, + + // 其他不支持的字段 + "default": true, + "const": true, + "examples": true, + "deprecated": true, + "readOnly": true, + "writeOnly": true, + "contentMediaType": true, + "contentEncoding": true, + + // Claude 特有字段 + "strict": true, } // cleanSchemaValue 递归清理 schema 值 @@ -523,6 +615,31 @@ func cleanSchemaValue(value any) any { continue } + // 特殊处理 format 字段:只保留 Gemini 支持的 format 值 + if k == "format" { + if formatStr, ok := val.(string); ok { + // Gemini 只支持 date-time, date, time + if formatStr == "date-time" || formatStr == "date" || formatStr == "time" { + result[k] = val + } + // 其他 format 值直接跳过 + } + continue + } + + // 特殊处理 additionalProperties:Claude API 只支持布尔值,不支持 schema 对象 + if k == "additionalProperties" { + if boolVal, ok := val.(bool); ok { + result[k] = boolVal + log.Printf("[Debug] additionalProperties is bool: %v", boolVal) + } else { + // 如果是 schema 对象,转换为 false(更安全的默认值) + result[k] = false + log.Printf("[Debug] additionalProperties is not bool (type: %T), converting to false", val) + } + continue + } + // 递归清理所有值 result[k] = cleanSchemaValue(val) } diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go new file mode 100644 index 00000000..56eebad0 --- /dev/null +++ b/backend/internal/pkg/antigravity/request_transformer_test.go @@ -0,0 +1,179 @@ +package antigravity + +import ( + "encoding/json" + "testing" +) + +// TestBuildParts_ThinkingBlockWithoutSignature 测试thinking block无signature时的处理 +func TestBuildParts_ThinkingBlockWithoutSignature(t *testing.T) { + tests := []struct { + name string + content string + allowDummyThought bool + expectedParts int + description string + }{ + { + name: "Claude model - skip thinking block without signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": ""}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: false, + expectedParts: 2, // 只有两个text block + description: "Claude模型应该跳过无signature的thinking block", + }, + { + name: "Claude model - keep thinking block with signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": "valid_sig"}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: false, + expectedParts: 3, // 三个block都保留 + description: "Claude模型应该保留有signature的thinking block", + }, + { + name: "Gemini model - use dummy signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": ""}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: true, + expectedParts: 3, // 三个block都保留,thinking使用dummy signature + description: "Gemini模型应该为无signature的thinking block使用dummy signature", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + toolIDToName := make(map[string]string) + parts, err := buildParts(json.RawMessage(tt.content), toolIDToName, tt.allowDummyThought) + + if err != nil { + t.Fatalf("buildParts() error = %v", err) + } + + if len(parts) != tt.expectedParts { + t.Errorf("%s: got %d parts, want %d parts", tt.description, len(parts), tt.expectedParts) + } + }) + } +} + +// TestBuildTools_CustomTypeTools 测试custom类型工具转换 +func TestBuildTools_CustomTypeTools(t *testing.T) { + tests := []struct { + name string + tools []ClaudeTool + expectedLen int + description string + }{ + { + name: "Standard tool format", + tools: []ClaudeTool{ + { + Name: "get_weather", + Description: "Get weather information", + InputSchema: map[string]any{ + "type": "object", + "properties": map[string]any{ + "location": map[string]any{"type": "string"}, + }, + }, + }, + }, + expectedLen: 1, + description: "标准工具格式应该正常转换", + }, + { + name: "Custom type tool (MCP format)", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "mcp_tool", + Custom: &ClaudeCustomToolSpec{ + Description: "MCP tool description", + InputSchema: map[string]any{ + "type": "object", + "properties": map[string]any{ + "param": map[string]any{"type": "string"}, + }, + }, + }, + }, + }, + expectedLen: 1, + description: "Custom类型工具应该从Custom字段读取description和input_schema", + }, + { + name: "Mixed standard and custom tools", + tools: []ClaudeTool{ + { + Name: "standard_tool", + Description: "Standard tool", + InputSchema: map[string]any{"type": "object"}, + }, + { + Type: "custom", + Name: "custom_tool", + Custom: &ClaudeCustomToolSpec{ + Description: "Custom tool", + InputSchema: map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, // 返回一个GeminiToolDeclaration,包含2个function declarations + description: "混合标准和custom工具应该都能正确转换", + }, + { + name: "Invalid custom tool - nil Custom field", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "invalid_custom", + // Custom 为 nil + }, + }, + expectedLen: 0, // 应该被跳过 + description: "Custom字段为nil的custom工具应该被跳过", + }, + { + name: "Invalid custom tool - nil InputSchema", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "invalid_custom", + Custom: &ClaudeCustomToolSpec{ + Description: "Invalid", + // InputSchema 为 nil + }, + }, + }, + expectedLen: 0, // 应该被跳过 + description: "InputSchema为nil的custom工具应该被跳过", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := buildTools(tt.tools) + + if len(result) != tt.expectedLen { + t.Errorf("%s: got %d tool declarations, want %d", tt.description, len(result), tt.expectedLen) + } + + // 验证function declarations存在 + if len(result) > 0 && result[0].FunctionDeclarations != nil { + if len(result[0].FunctionDeclarations) != len(tt.tools) { + t.Errorf("%s: got %d function declarations, want %d", + tt.description, len(result[0].FunctionDeclarations), len(tt.tools)) + } + } + }) + } +} diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go index 97ad6c83..0db3ed4a 100644 --- a/backend/internal/pkg/claude/constants.go +++ b/backend/internal/pkg/claude/constants.go @@ -16,6 +16,12 @@ const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleav // HaikuBetaHeader Haiku 模型使用的 anthropic-beta header(不需要 claude-code beta) const HaikuBetaHeader = BetaOAuth + "," + BetaInterleavedThinking +// ApiKeyBetaHeader API-key 账号建议使用的 anthropic-beta header(不包含 oauth) +const ApiKeyBetaHeader = BetaClaudeCode + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming + +// ApiKeyHaikuBetaHeader Haiku 模型在 API-key 账号下使用的 anthropic-beta header(不包含 oauth / claude-code) +const ApiKeyHaikuBetaHeader = BetaInterleavedThinking + // Claude Code 客户端默认请求头 var DefaultHeaders = map[string]string{ "User-Agent": "claude-cli/2.0.62 (external, cli)", diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index ae2976f8..5b3bf565 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -358,6 +358,15 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return nil, fmt.Errorf("transform request: %w", err) } + // 调试:记录转换后的请求体(仅记录前 2000 字符) + if bodyJSON, err := json.Marshal(geminiBody); err == nil { + truncated := string(bodyJSON) + if len(truncated) > 2000 { + truncated = truncated[:2000] + "..." + } + log.Printf("[Debug] Transformed Gemini request: %s", truncated) + } + // 构建上游 action action := "generateContent" if claudeReq.Stream { diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index d542e9c2..5884602d 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -19,6 +19,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/tidwall/gjson" "github.com/tidwall/sjson" "github.com/gin-gonic/gin" @@ -684,6 +685,30 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // 处理错误响应(不可重试的错误) if resp.StatusCode >= 400 { + // 可选:对部分 400 触发 failover(默认关闭以保持语义) + if resp.StatusCode == 400 && s.cfg != nil && s.cfg.Gateway.FailoverOn400 { + respBody, readErr := io.ReadAll(resp.Body) + if readErr != nil { + // ReadAll failed, fall back to normal error handling without consuming the stream + return s.handleErrorResponse(ctx, resp, c, account) + } + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + if s.shouldFailoverOn400(respBody) { + if s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Account %d: 400 error, attempting failover: %s", + account.ID, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } else { + log.Printf("Account %d: 400 error, attempting failover", account.ID) + } + s.handleFailoverSideEffects(ctx, resp, account) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + } return s.handleErrorResponse(ctx, resp, c, account) } @@ -786,6 +811,13 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 处理anthropic-beta header(OAuth账号需要特殊处理) if tokenType == "oauth" { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForApiKey && req.Header.Get("anthropic-beta") == "" { + // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) + if requestNeedsBetaFeatures(body) { + if beta := defaultApiKeyBetaHeader(body); beta != "" { + req.Header.Set("anthropic-beta", beta) + } + } } return req, nil @@ -838,6 +870,83 @@ func (s *GatewayService) getBetaHeader(modelID string, clientBetaHeader string) return claude.DefaultBetaHeader } +func requestNeedsBetaFeatures(body []byte) bool { + tools := gjson.GetBytes(body, "tools") + if tools.Exists() && tools.IsArray() && len(tools.Array()) > 0 { + return true + } + if strings.EqualFold(gjson.GetBytes(body, "thinking.type").String(), "enabled") { + return true + } + return false +} + +func defaultApiKeyBetaHeader(body []byte) string { + modelID := gjson.GetBytes(body, "model").String() + if strings.Contains(strings.ToLower(modelID), "haiku") { + return claude.ApiKeyHaikuBetaHeader + } + return claude.ApiKeyBetaHeader +} + +func truncateForLog(b []byte, maxBytes int) string { + if maxBytes <= 0 { + maxBytes = 2048 + } + if len(b) > maxBytes { + b = b[:maxBytes] + } + s := string(b) + // 保持一行,避免污染日志格式 + s = strings.ReplaceAll(s, "\n", "\\n") + s = strings.ReplaceAll(s, "\r", "\\r") + return s +} + +func (s *GatewayService) shouldFailoverOn400(respBody []byte) bool { + // 只对“可能是兼容性差异导致”的 400 允许切换,避免无意义重试。 + // 默认保守:无法识别则不切换。 + msg := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) + if msg == "" { + return false + } + + // 缺少/错误的 beta header:换账号/链路可能成功(尤其是混合调度时)。 + // 更精确匹配 beta 相关的兼容性问题,避免误触发切换。 + if strings.Contains(msg, "anthropic-beta") || + strings.Contains(msg, "beta feature") || + strings.Contains(msg, "requires beta") { + return true + } + + // thinking/tool streaming 等兼容性约束(常见于中间转换链路) + if strings.Contains(msg, "thinking") || strings.Contains(msg, "thought_signature") || strings.Contains(msg, "signature") { + return true + } + if strings.Contains(msg, "tool_use") || strings.Contains(msg, "tool_result") || strings.Contains(msg, "tools") { + return true + } + + return false +} + +func extractUpstreamErrorMessage(body []byte) string { + // Claude 风格:{"type":"error","error":{"type":"...","message":"..."}} + if m := gjson.GetBytes(body, "error.message").String(); strings.TrimSpace(m) != "" { + inner := strings.TrimSpace(m) + // 有些上游会把完整 JSON 作为字符串塞进 message + if strings.HasPrefix(inner, "{") { + if innerMsg := gjson.Get(inner, "error.message").String(); strings.TrimSpace(innerMsg) != "" { + return innerMsg + } + } + return m + } + + // 兜底:尝试顶层 message + return gjson.GetBytes(body, "message").String() +} + func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { body, _ := io.ReadAll(resp.Body) @@ -850,6 +959,16 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res switch resp.StatusCode { case 400: + // 仅记录上游错误摘要(避免输出请求内容);需要时可通过配置打开 + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Upstream 400 error (account=%d platform=%s type=%s): %s", + account.ID, + account.Platform, + account.Type, + truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } c.Data(http.StatusBadRequest, "application/json", body) return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) case 401: @@ -1329,6 +1448,18 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, // 标记账号状态(429/529等) s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + // 记录上游错误摘要便于排障(不回显请求内容) + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "count_tokens upstream error %d (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + // 返回简化的错误响应 errMsg := "Upstream request failed" switch resp.StatusCode { @@ -1409,6 +1540,13 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // OAuth 账号:处理 anthropic-beta header if tokenType == "oauth" { req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForApiKey && req.Header.Get("anthropic-beta") == "" { + // API-key:与 messages 同步的按需 beta 注入(默认关闭) + if requestNeedsBetaFeatures(body) { + if beta := defaultApiKeyBetaHeader(body); beta != "" { + req.Header.Set("anthropic-beta", beta) + } + } } return req, nil diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index a0bf1b6a..b1877800 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -2278,11 +2278,13 @@ func convertClaudeToolsToGeminiTools(tools any) []any { "properties": map[string]any{}, } } + // 清理 JSON Schema + cleanedParams := cleanToolSchema(params) funcDecls = append(funcDecls, map[string]any{ "name": name, "description": desc, - "parameters": params, + "parameters": cleanedParams, }) } @@ -2296,6 +2298,41 @@ func convertClaudeToolsToGeminiTools(tools any) []any { } } +// cleanToolSchema 清理工具的 JSON Schema,移除 Gemini 不支持的字段 +func cleanToolSchema(schema any) any { + if schema == nil { + return nil + } + + switch v := schema.(type) { + case map[string]any: + cleaned := make(map[string]any) + for key, value := range v { + // 跳过不支持的字段 + if key == "$schema" || key == "$id" || key == "$ref" || + key == "additionalProperties" || key == "minLength" || + key == "maxLength" || key == "minItems" || key == "maxItems" { + continue + } + // 递归清理嵌套对象 + cleaned[key] = cleanToolSchema(value) + } + // 规范化 type 字段为大写 + if typeVal, ok := cleaned["type"].(string); ok { + cleaned["type"] = strings.ToUpper(typeVal) + } + return cleaned + case []any: + cleaned := make([]any, len(v)) + for i, item := range v { + cleaned[i] = cleanToolSchema(item) + } + return cleaned + default: + return v + } +} + func convertClaudeGenerationConfig(req map[string]any) map[string]any { out := make(map[string]any) if mt, ok := asInt(req["max_tokens"]); ok && mt > 0 { diff --git a/backend/internal/service/gemini_messages_compat_service_test.go b/backend/internal/service/gemini_messages_compat_service_test.go new file mode 100644 index 00000000..d49f2eb3 --- /dev/null +++ b/backend/internal/service/gemini_messages_compat_service_test.go @@ -0,0 +1,128 @@ +package service + +import ( + "testing" +) + +// TestConvertClaudeToolsToGeminiTools_CustomType 测试custom类型工具转换 +func TestConvertClaudeToolsToGeminiTools_CustomType(t *testing.T) { + tests := []struct { + name string + tools any + expectedLen int + description string + }{ + { + name: "Standard tools", + tools: []any{ + map[string]any{ + "name": "get_weather", + "description": "Get weather info", + "input_schema": map[string]any{"type": "object"}, + }, + }, + expectedLen: 1, + description: "标准工具格式应该正常转换", + }, + { + name: "Custom type tool (MCP format)", + tools: []any{ + map[string]any{ + "type": "custom", + "name": "mcp_tool", + "custom": map[string]any{ + "description": "MCP tool description", + "input_schema": map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, + description: "Custom类型工具应该从custom字段读取", + }, + { + name: "Mixed standard and custom tools", + tools: []any{ + map[string]any{ + "name": "standard_tool", + "description": "Standard", + "input_schema": map[string]any{"type": "object"}, + }, + map[string]any{ + "type": "custom", + "name": "custom_tool", + "custom": map[string]any{ + "description": "Custom", + "input_schema": map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, + description: "混合工具应该都能正确转换", + }, + { + name: "Custom tool without custom field", + tools: []any{ + map[string]any{ + "type": "custom", + "name": "invalid_custom", + // 缺少 custom 字段 + }, + }, + expectedLen: 0, // 应该被跳过 + description: "缺少custom字段的custom工具应该被跳过", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := convertClaudeToolsToGeminiTools(tt.tools) + + if tt.expectedLen == 0 { + if result != nil { + t.Errorf("%s: expected nil result, got %v", tt.description, result) + } + return + } + + if result == nil { + t.Fatalf("%s: expected non-nil result", tt.description) + } + + if len(result) != 1 { + t.Errorf("%s: expected 1 tool declaration, got %d", tt.description, len(result)) + return + } + + toolDecl, ok := result[0].(map[string]any) + if !ok { + t.Fatalf("%s: result[0] is not map[string]any", tt.description) + } + + funcDecls, ok := toolDecl["functionDeclarations"].([]any) + if !ok { + t.Fatalf("%s: functionDeclarations is not []any", tt.description) + } + + toolsArr, _ := tt.tools.([]any) + expectedFuncCount := 0 + for _, tool := range toolsArr { + toolMap, _ := tool.(map[string]any) + if toolMap["name"] != "" { + // 检查是否为有效的custom工具 + if toolMap["type"] == "custom" { + if toolMap["custom"] != nil { + expectedFuncCount++ + } + } else { + expectedFuncCount++ + } + } + } + + if len(funcDecls) != expectedFuncCount { + t.Errorf("%s: expected %d function declarations, got %d", + tt.description, expectedFuncCount, len(funcDecls)) + } + }) + } +} diff --git a/backend/internal/service/gemini_oauth_service.go b/backend/internal/service/gemini_oauth_service.go index e4bda5f8..221bd0f2 100644 --- a/backend/internal/service/gemini_oauth_service.go +++ b/backend/internal/service/gemini_oauth_service.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "regexp" "strconv" "strings" "time" @@ -163,6 +164,45 @@ type GeminiTokenInfo struct { Scope string `json:"scope,omitempty"` ProjectID string `json:"project_id,omitempty"` OAuthType string `json:"oauth_type,omitempty"` // "code_assist" 或 "ai_studio" + TierID string `json:"tier_id,omitempty"` // Gemini Code Assist tier: LEGACY/PRO/ULTRA +} + +// validateTierID validates tier_id format and length +func validateTierID(tierID string) error { + if tierID == "" { + return nil // Empty is allowed + } + if len(tierID) > 64 { + return fmt.Errorf("tier_id exceeds maximum length of 64 characters") + } + // Allow alphanumeric, underscore, hyphen, and slash (for tier paths) + if !regexp.MustCompile(`^[a-zA-Z0-9_/-]+$`).MatchString(tierID) { + return fmt.Errorf("tier_id contains invalid characters") + } + return nil +} + +// extractTierIDFromAllowedTiers extracts tierID from LoadCodeAssist response +// Prioritizes IsDefault tier, falls back to first non-empty tier +func extractTierIDFromAllowedTiers(allowedTiers []geminicli.AllowedTier) string { + tierID := "LEGACY" + // First pass: look for default tier + for _, tier := range allowedTiers { + if tier.IsDefault && strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + // Second pass: if still LEGACY, take first non-empty tier + if tierID == "LEGACY" { + for _, tier := range allowedTiers { + if strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + } + return tierID } func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExchangeCodeInput) (*GeminiTokenInfo, error) { @@ -223,13 +263,14 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - 300 projectID := sessionProjectID + var tierID string // 对于 code_assist 模式,project_id 是必需的 // 对于 ai_studio 模式,project_id 是可选的(不影响使用 AI Studio API) if oauthType == "code_assist" { if projectID == "" { var err error - projectID, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) + projectID, tierID, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) if err != nil { // 记录警告但不阻断流程,允许后续补充 project_id fmt.Printf("[GeminiOAuth] Warning: Failed to fetch project_id during token exchange: %v\n", err) @@ -248,6 +289,7 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch ExpiresAt: expiresAt, Scope: tokenResp.Scope, ProjectID: projectID, + TierID: tierID, OAuthType: oauthType, }, nil } @@ -357,7 +399,7 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A // For Code Assist, project_id is required. Auto-detect if missing. // For AI Studio OAuth, project_id is optional and should not block refresh. if oauthType == "code_assist" && strings.TrimSpace(tokenInfo.ProjectID) == "" { - projectID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) + projectID, tierID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) if err != nil { return nil, fmt.Errorf("failed to auto-detect project_id: %w", err) } @@ -366,6 +408,7 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A return nil, fmt.Errorf("failed to auto-detect project_id: empty result") } tokenInfo.ProjectID = projectID + tokenInfo.TierID = tierID } return tokenInfo, nil @@ -388,6 +431,13 @@ func (s *GeminiOAuthService) BuildAccountCredentials(tokenInfo *GeminiTokenInfo) if tokenInfo.ProjectID != "" { creds["project_id"] = tokenInfo.ProjectID } + if tokenInfo.TierID != "" { + // Validate tier_id before storing + if err := validateTierID(tokenInfo.TierID); err == nil { + creds["tier_id"] = tokenInfo.TierID + } + // Silently skip invalid tier_id (don't block account creation) + } if tokenInfo.OAuthType != "" { creds["oauth_type"] = tokenInfo.OAuthType } @@ -398,34 +448,26 @@ func (s *GeminiOAuthService) Stop() { s.sessionStore.Stop() } -func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, proxyURL string) (string, error) { +func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, proxyURL string) (string, string, error) { if s.codeAssist == nil { - return "", errors.New("code assist client not configured") + return "", "", errors.New("code assist client not configured") } loadResp, loadErr := s.codeAssist.LoadCodeAssist(ctx, accessToken, proxyURL, nil) + + // Extract tierID from response (works whether CloudAICompanionProject is set or not) + tierID := "LEGACY" + if loadResp != nil { + tierID = extractTierIDFromAllowedTiers(loadResp.AllowedTiers) + } + + // If LoadCodeAssist returned a project, use it if loadErr == nil && loadResp != nil && strings.TrimSpace(loadResp.CloudAICompanionProject) != "" { - return strings.TrimSpace(loadResp.CloudAICompanionProject), nil + return strings.TrimSpace(loadResp.CloudAICompanionProject), tierID, nil } // Pick tier from allowedTiers; if no default tier is marked, pick the first non-empty tier ID. - tierID := "LEGACY" - if loadResp != nil { - for _, tier := range loadResp.AllowedTiers { - if tier.IsDefault && strings.TrimSpace(tier.ID) != "" { - tierID = strings.TrimSpace(tier.ID) - break - } - } - if strings.TrimSpace(tierID) == "" || tierID == "LEGACY" { - for _, tier := range loadResp.AllowedTiers { - if strings.TrimSpace(tier.ID) != "" { - tierID = strings.TrimSpace(tier.ID) - break - } - } - } - } + // (tierID already extracted above, reuse it) req := &geminicli.OnboardUserRequest{ TierID: tierID, @@ -443,39 +485,39 @@ func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, pr // If Code Assist onboarding fails (e.g. INVALID_ARGUMENT), fallback to Cloud Resource Manager projects. fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), nil + return strings.TrimSpace(fallback), tierID, nil } - return "", err + return "", "", err } if resp.Done { if resp.Response != nil && resp.Response.CloudAICompanionProject != nil { switch v := resp.Response.CloudAICompanionProject.(type) { case string: - return strings.TrimSpace(v), nil + return strings.TrimSpace(v), tierID, nil case map[string]any: if id, ok := v["id"].(string); ok { - return strings.TrimSpace(id), nil + return strings.TrimSpace(id), tierID, nil } } } fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), nil + return strings.TrimSpace(fallback), tierID, nil } - return "", errors.New("onboardUser completed but no project_id returned") + return "", "", errors.New("onboardUser completed but no project_id returned") } time.Sleep(2 * time.Second) } fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) if fbErr == nil && strings.TrimSpace(fallback) != "" { - return strings.TrimSpace(fallback), nil + return strings.TrimSpace(fallback), tierID, nil } if loadErr != nil { - return "", fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) + return "", "", fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) } - return "", fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) + return "", "", fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) } type googleCloudProject struct { diff --git a/backend/internal/service/gemini_token_provider.go b/backend/internal/service/gemini_token_provider.go index 2195ec55..5f369de5 100644 --- a/backend/internal/service/gemini_token_provider.go +++ b/backend/internal/service/gemini_token_provider.go @@ -112,7 +112,7 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou } } - detected, err := p.geminiOAuthService.fetchProjectID(ctx, accessToken, proxyURL) + detected, tierID, err := p.geminiOAuthService.fetchProjectID(ctx, accessToken, proxyURL) if err != nil { log.Printf("[GeminiTokenProvider] Auto-detect project_id failed: %v, fallback to AI Studio API mode", err) return accessToken, nil @@ -123,6 +123,9 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou account.Credentials = make(map[string]any) } account.Credentials["project_id"] = detected + if tierID != "" { + account.Credentials["tier_id"] = tierID + } _ = p.accountRepo.Update(ctx, account) } } diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 5bd85d7d..5478d151 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -122,6 +122,21 @@ pricing: # Hash check interval in minutes hash_check_interval_minutes: 10 +# ============================================================================= +# Gateway (Optional) +# ============================================================================= +gateway: + # Wait time (in seconds) for upstream response headers (streaming body not affected) + response_header_timeout: 300 + # Log upstream error response body summary (safe/truncated; does not log request content) + log_upstream_error_body: false + # Max bytes to log from upstream error body + log_upstream_error_body_max_bytes: 2048 + # Auto inject anthropic-beta for API-key accounts when needed (default off) + inject_beta_for_apikey: false + # Allow failover on selected 400 errors (default off) + failover_on_400: false + # ============================================================================= # Gemini OAuth (Required for Gemini accounts) # ============================================================================= diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 6563ee0c..1770a985 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -952,6 +952,7 @@ "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~6.21.0" } @@ -1367,6 +1368,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -1443,6 +1445,7 @@ "resolved": "https://registry.npmmirror.com/chart.js/-/chart.js-4.5.1.tgz", "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", "license": "MIT", + "peer": true, "dependencies": { "@kurkle/color": "^0.3.0" }, @@ -2040,6 +2043,7 @@ "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", "dev": true, "license": "MIT", + "peer": true, "bin": { "jiti": "bin/jiti.js" } @@ -2348,6 +2352,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -2821,6 +2826,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2854,6 +2860,7 @@ "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "devOptional": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -2926,6 +2933,7 @@ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", @@ -3097,6 +3105,7 @@ "resolved": "https://registry.npmmirror.com/vue/-/vue-3.5.25.tgz", "integrity": "sha512-YLVdgv2K13WJ6n+kD5owehKtEXwdwXuj2TTyJMsO7pSeKw2bfRNZGjhB7YzrpbMYj5b5QsUebHpOqR3R3ziy/g==", "license": "MIT", + "peer": true, "dependencies": { "@vue/compiler-dom": "3.5.25", "@vue/compiler-sfc": "3.5.25", @@ -3190,6 +3199,7 @@ "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@volar/typescript": "2.4.15", "@vue/language-core": "2.2.12" diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue index c1ca08fa..914678a5 100644 --- a/frontend/src/components/account/AccountStatusIndicator.vue +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -83,6 +83,14 @@ > + + + + {{ tierDisplay }} + @@ -140,4 +148,23 @@ const statusText = computed(() => { return props.account.status }) +// Computed: tier display +const tierDisplay = computed(() => { + const credentials = props.account.credentials as Record | undefined + const tierId = credentials?.tier_id + if (!tierId || tierId === 'unknown') return null + + const tierMap: Record = { + 'free': 'Free', + 'payg': 'Pay-as-you-go', + 'pay-as-you-go': 'Pay-as-you-go', + 'enterprise': 'Enterprise', + 'LEGACY': 'Legacy', + 'PRO': 'Pro', + 'ULTRA': 'Ultra' + } + + return tierMap[tierId] || tierId +}) + From 7df914af0619abdf9ccb3598973d89762f8a40a0 Mon Sep 17 00:00:00 2001 From: ianshaw Date: Wed, 31 Dec 2025 21:45:24 -0800 Subject: [PATCH 34/51] =?UTF-8?q?feat(gemini):=20=E6=B7=BB=E5=8A=A0=20Goog?= =?UTF-8?q?le=20One=20=E5=AD=98=E5=82=A8=E7=A9=BA=E9=97=B4=E6=8E=A8?= =?UTF-8?q?=E6=96=AD=20Tier=20=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## 功能概述 通过 Google Drive API 获取存储空间配额来推断 Google One 订阅等级,并优化统一的配额显示系统。 ## 后端改动 - 新增 Drive API 客户端 (drive_client.go) - 支持代理和指数退避重试 - 处理 403/429 错误 - 添加 Tier 推断逻辑 (inferGoogleOneTier) - 支持 6 种 tier 类型:AI_PREMIUM, GOOGLE_ONE_STANDARD, GOOGLE_ONE_BASIC, FREE, GOOGLE_ONE_UNKNOWN, GOOGLE_ONE_UNLIMITED - 集成到 OAuth 流程 - ExchangeCode: 授权时自动获取 tier - RefreshAccountToken: Token 刷新时更新 tier (24小时缓存) - 新增管理 API 端点 - POST /api/v1/admin/accounts/:id/refresh-tier (单个账号刷新) - POST /api/v1/admin/accounts/batch-refresh-tier (批量刷新) ## 前端改动 - 更新 AccountQuotaInfo.vue - 添加 Google One tier 标签映射 - 添加 tier 颜色样式 (紫色/蓝色/绿色/灰色/琥珀色) - 更新 AccountUsageCell.vue - 添加 Google One tier 显示逻辑 - 根据 oauth_type 区分显示方式 - 添加国际化翻译 (en.ts, zh.ts) - aiPremium, standard, basic, free, personal, unlimited ## Tier 推断规则 - >= 2TB: AI Premium - >= 200GB: Google One Standard - >= 100GB: Google One Basic - >= 15GB: Free - > 100TB: Unlimited (G Suite legacy) - 其他/失败: Unknown (显示为 Personal) ## 优雅降级 - Drive API 失败时使用 GOOGLE_ONE_UNKNOWN - 不阻断 OAuth 流程 - 24小时缓存避免频繁调用 ## 测试 - ✅ 后端编译成功 - ✅ 前端构建成功 - ✅ 所有代码符合现有规范 --- .../internal/handler/admin/account_handler.go | 165 +++++++++++++ .../handler/admin/gemini_oauth_handler.go | 8 +- .../internal/pkg/geminicli/drive_client.go | 113 +++++++++ backend/internal/server/routes/admin.go | 2 + .../internal/service/gemini_oauth_service.go | 159 ++++++++++++- .../components/account/AccountQuotaInfo.vue | 74 ++++-- .../components/account/AccountUsageCell.vue | 37 +++ .../components/account/CreateAccountModal.vue | 219 +++++++++++------- frontend/src/composables/useGeminiOAuth.ts | 8 +- frontend/src/i18n/locales/en.ts | 8 +- frontend/src/i18n/locales/zh.ts | 8 +- 11 files changed, 691 insertions(+), 110 deletions(-) create mode 100644 backend/internal/pkg/geminicli/drive_client.go diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index ac938f8c..58715706 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -3,6 +3,7 @@ package admin import ( "strconv" "strings" + "time" "github.com/Wei-Shaw/sub2api/internal/handler/dto" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" @@ -989,3 +990,167 @@ func (h *AccountHandler) GetAvailableModels(c *gin.Context) { response.Success(c, models) } + +// RefreshTier handles refreshing Google One tier for a single account +// POST /api/v1/admin/accounts/:id/refresh-tier +func (h *AccountHandler) RefreshTier(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + account, err := h.adminService.GetAccount(c.Request.Context(), accountID) + if err != nil { + response.NotFound(c, "Account not found") + return + } + + if account.Credentials == nil || account.Credentials["oauth_type"] != "google_one" { + response.BadRequest(c, "Account is not a google_one OAuth account") + return + } + + accessToken, ok := account.Credentials["access_token"].(string) + if !ok || accessToken == "" { + response.BadRequest(c, "Missing access_token in credentials") + return + } + + var proxyURL string + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + tierID, storageInfo, err := h.geminiOAuthService.FetchGoogleOneTier(c.Request.Context(), accessToken, proxyURL) + + if account.Extra == nil { + account.Extra = make(map[string]any) + } + if storageInfo != nil { + account.Extra["drive_storage_limit"] = storageInfo.Limit + account.Extra["drive_storage_usage"] = storageInfo.Usage + account.Extra["drive_tier_updated_at"] = timezone.Now().Format(time.RFC3339) + } + account.Credentials["tier_id"] = tierID + + _, updateErr := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ + Credentials: account.Credentials, + Extra: account.Extra, + }) + if updateErr != nil { + response.ErrorFrom(c, updateErr) + return + } + + response.Success(c, gin.H{ + "tier_id": tierID, + "drive_storage_limit": account.Extra["drive_storage_limit"], + "drive_storage_usage": account.Extra["drive_storage_usage"], + "updated_at": account.Extra["drive_tier_updated_at"], + }) +} + +// BatchRefreshTierRequest represents batch tier refresh request +type BatchRefreshTierRequest struct { + AccountIDs []int64 `json:"account_ids"` +} + +// BatchRefreshTier handles batch refreshing Google One tier +// POST /api/v1/admin/accounts/batch-refresh-tier +func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { + var req BatchRefreshTierRequest + if err := c.ShouldBindJSON(&req); err != nil { + req = BatchRefreshTierRequest{} + } + + ctx := c.Request.Context() + var accounts []service.Account + + if len(req.AccountIDs) == 0 { + allAccounts, _, err := h.adminService.ListAccounts(ctx, 1, 10000, "gemini", "oauth", "", "") + if err != nil { + response.ErrorFrom(c, err) + return + } + for _, acc := range allAccounts { + if acc.Credentials != nil && acc.Credentials["oauth_type"] == "google_one" { + accounts = append(accounts, acc) + } + } + } else { + for _, id := range req.AccountIDs { + acc, err := h.adminService.GetAccount(ctx, id) + if err != nil { + continue + } + if acc.Credentials != nil && acc.Credentials["oauth_type"] == "google_one" { + accounts = append(accounts, *acc) + } + } + } + + total := len(accounts) + success := 0 + failed := 0 + errors := []gin.H{} + + for _, account := range accounts { + accessToken, ok := account.Credentials["access_token"].(string) + if !ok || accessToken == "" { + failed++ + errors = append(errors, gin.H{ + "account_id": account.ID, + "error": "missing access_token", + }) + continue + } + + var proxyURL string + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + tierID, storageInfo, err := h.geminiOAuthService.FetchGoogleOneTier(ctx, accessToken, proxyURL) + if err != nil { + failed++ + errors = append(errors, gin.H{ + "account_id": account.ID, + "error": err.Error(), + }) + continue + } + + if account.Extra == nil { + account.Extra = make(map[string]any) + } + if storageInfo != nil { + account.Extra["drive_storage_limit"] = storageInfo.Limit + account.Extra["drive_storage_usage"] = storageInfo.Usage + account.Extra["drive_tier_updated_at"] = timezone.Now().Format(time.RFC3339) + } + account.Credentials["tier_id"] = tierID + + _, updateErr := h.adminService.UpdateAccount(ctx, account.ID, &service.UpdateAccountInput{ + Credentials: account.Credentials, + Extra: account.Extra, + }) + if updateErr != nil { + failed++ + errors = append(errors, gin.H{ + "account_id": account.ID, + "error": updateErr.Error(), + }) + continue + } + + success++ + } + + response.Success(c, gin.H{ + "total": total, + "success": success, + "failed": failed, + "errors": errors, + }) +} diff --git a/backend/internal/handler/admin/gemini_oauth_handler.go b/backend/internal/handler/admin/gemini_oauth_handler.go index 4440aa21..037800e2 100644 --- a/backend/internal/handler/admin/gemini_oauth_handler.go +++ b/backend/internal/handler/admin/gemini_oauth_handler.go @@ -46,8 +46,8 @@ func (h *GeminiOAuthHandler) GenerateAuthURL(c *gin.Context) { if oauthType == "" { oauthType = "code_assist" } - if oauthType != "code_assist" && oauthType != "ai_studio" { - response.BadRequest(c, "Invalid oauth_type: must be 'code_assist' or 'ai_studio'") + if oauthType != "code_assist" && oauthType != "google_one" && oauthType != "ai_studio" { + response.BadRequest(c, "Invalid oauth_type: must be 'code_assist', 'google_one', or 'ai_studio'") return } @@ -92,8 +92,8 @@ func (h *GeminiOAuthHandler) ExchangeCode(c *gin.Context) { if oauthType == "" { oauthType = "code_assist" } - if oauthType != "code_assist" && oauthType != "ai_studio" { - response.BadRequest(c, "Invalid oauth_type: must be 'code_assist' or 'ai_studio'") + if oauthType != "code_assist" && oauthType != "google_one" && oauthType != "ai_studio" { + response.BadRequest(c, "Invalid oauth_type: must be 'code_assist', 'google_one', or 'ai_studio'") return } diff --git a/backend/internal/pkg/geminicli/drive_client.go b/backend/internal/pkg/geminicli/drive_client.go new file mode 100644 index 00000000..5a959fac --- /dev/null +++ b/backend/internal/pkg/geminicli/drive_client.go @@ -0,0 +1,113 @@ +package geminicli + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" +) + +// DriveStorageInfo represents Google Drive storage quota information +type DriveStorageInfo struct { + Limit int64 `json:"limit"` // Storage limit in bytes + Usage int64 `json:"usage"` // Current usage in bytes +} + +// DriveClient interface for Google Drive API operations +type DriveClient interface { + GetStorageQuota(ctx context.Context, accessToken, proxyURL string) (*DriveStorageInfo, error) +} + +type driveClient struct { + httpClient *http.Client +} + +// NewDriveClient creates a new Drive API client +func NewDriveClient() DriveClient { + return &driveClient{ + httpClient: &http.Client{ + Timeout: 10 * time.Second, + }, + } +} + +// GetStorageQuota fetches storage quota from Google Drive API +func (c *driveClient) GetStorageQuota(ctx context.Context, accessToken, proxyURL string) (*DriveStorageInfo, error) { + const driveAPIURL = "https://www.googleapis.com/drive/v3/about?fields=storageQuota" + + req, err := http.NewRequestWithContext(ctx, "GET", driveAPIURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Authorization", "Bearer "+accessToken) + + // Get HTTP client with proxy support + client, err := httpclient.GetClient(httpclient.Options{ + ProxyURL: proxyURL, + Timeout: 10 * time.Second, + }) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP client: %w", err) + } + + // Retry logic with exponential backoff for rate limits + var resp *http.Response + maxRetries := 3 + for attempt := 0; attempt < maxRetries; attempt++ { + resp, err = client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to execute request: %w", err) + } + + // Success + if resp.StatusCode == http.StatusOK { + break + } + + // Rate limit - retry with exponential backoff + if resp.StatusCode == http.StatusTooManyRequests && attempt < maxRetries-1 { + resp.Body.Close() + backoff := time.Duration(1< 100*1024*1024*1024*1024 { // > 100TB + return TierGoogleOneUnlimited + } + + // AI Premium (2TB+) + if storageBytes >= 2*1024*1024*1024*1024 { // >= 2TB + return TierAIPremium + } + + // Google One Standard (200GB) + if storageBytes >= 200*1024*1024*1024 { // >= 200GB + return TierGoogleOneStandard + } + + // Google One Basic (100GB) + if storageBytes >= 100*1024*1024*1024 { // >= 100GB + return TierGoogleOneBasic + } + + // Free (15GB) + if storageBytes >= 15*1024*1024*1024 { // >= 15GB + return TierFree + } + + return TierGoogleOneUnknown +} + +// fetchGoogleOneTier fetches Google One tier from Drive API +func (s *GeminiOAuthService) FetchGoogleOneTier(ctx context.Context, accessToken, proxyURL string) (string, *geminicli.DriveStorageInfo, error) { + driveClient := geminicli.NewDriveClient() + + storageInfo, err := driveClient.GetStorageQuota(ctx, accessToken, proxyURL) + if err != nil { + // Check if it's a 403 (scope not granted) + if strings.Contains(err.Error(), "status 403") { + fmt.Printf("[GeminiOAuth] Drive API scope not available: %v\n", err) + return TierGoogleOneUnknown, nil, err + } + // Other errors + fmt.Printf("[GeminiOAuth] Failed to fetch Drive storage: %v\n", err) + return TierGoogleOneUnknown, nil, err + } + + tierID := inferGoogleOneTier(storageInfo.Limit) + return tierID, storageInfo, nil +} + func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExchangeCodeInput) (*GeminiTokenInfo, error) { session, ok := s.sessionStore.Get(input.SessionID) if !ok { @@ -272,7 +337,8 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch projectID := sessionProjectID var tierID string - // 对于 code_assist 模式,project_id 是必需的 + // 对于 code_assist 模式,project_id 是必需的,需要调用 Code Assist API + // 对于 google_one 模式,使用个人 Google 账号,不需要 project_id,配额由 Google 网关自动识别 // 对于 ai_studio 模式,project_id 是可选的(不影响使用 AI Studio API) if oauthType == "code_assist" { if projectID == "" { @@ -298,7 +364,37 @@ func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExch if tierID == "" { tierID = "LEGACY" } + } else if oauthType == "google_one" { + // Attempt to fetch Drive storage tier + tierID, storageInfo, err := s.FetchGoogleOneTier(ctx, tokenResp.AccessToken, proxyURL) + if err != nil { + // Log warning but don't block - use fallback + fmt.Printf("[GeminiOAuth] Warning: Failed to fetch Drive tier: %v\n", err) + tierID = TierGoogleOneUnknown + } + + // Store Drive info in extra field for caching + if storageInfo != nil { + tokenInfo := &GeminiTokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + TokenType: tokenResp.TokenType, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: expiresAt, + Scope: tokenResp.Scope, + ProjectID: projectID, + TierID: tierID, + OAuthType: oauthType, + Extra: map[string]any{ + "drive_storage_limit": storageInfo.Limit, + "drive_storage_usage": storageInfo.Usage, + "drive_tier_updated_at": time.Now().Format(time.RFC3339), + }, + } + return tokenInfo, nil + } } + // ai_studio 模式不设置 tierID,保持为空 return &GeminiTokenInfo{ AccessToken: tokenResp.AccessToken, @@ -455,6 +551,41 @@ func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *A if strings.TrimSpace(tokenInfo.ProjectID) == "" { return nil, fmt.Errorf("failed to auto-detect project_id: empty result") } + } else if oauthType == "google_one" { + // Check if tier cache is stale (> 24 hours) + needsRefresh := true + if account.Extra != nil { + if updatedAtStr, ok := account.Extra["drive_tier_updated_at"].(string); ok { + if updatedAt, err := time.Parse(time.RFC3339, updatedAtStr); err == nil { + if time.Since(updatedAt) <= 24*time.Hour { + needsRefresh = false + // Use cached tier + if existingTierID != "" { + tokenInfo.TierID = existingTierID + } + } + } + } + } + + if needsRefresh { + tierID, storageInfo, err := s.FetchGoogleOneTier(ctx, tokenInfo.AccessToken, proxyURL) + if err == nil && storageInfo != nil { + tokenInfo.TierID = tierID + tokenInfo.Extra = map[string]any{ + "drive_storage_limit": storageInfo.Limit, + "drive_storage_usage": storageInfo.Usage, + "drive_tier_updated_at": time.Now().Format(time.RFC3339), + } + } else { + // Fallback to cached or unknown + if existingTierID != "" { + tokenInfo.TierID = existingTierID + } else { + tokenInfo.TierID = TierGoogleOneUnknown + } + } + } } return tokenInfo, nil @@ -487,6 +618,12 @@ func (s *GeminiOAuthService) BuildAccountCredentials(tokenInfo *GeminiTokenInfo) if tokenInfo.OAuthType != "" { creds["oauth_type"] = tokenInfo.OAuthType } + // Store extra metadata (Drive info) if present + if len(tokenInfo.Extra) > 0 { + for k, v := range tokenInfo.Extra { + creds[k] = v + } + } return creds } diff --git a/frontend/src/components/account/AccountQuotaInfo.vue b/frontend/src/components/account/AccountQuotaInfo.vue index c20d685d..512b4451 100644 --- a/frontend/src/components/account/AccountQuotaInfo.vue +++ b/frontend/src/components/account/AccountQuotaInfo.vue @@ -48,6 +48,12 @@ const isCodeAssist = computed(() => { return creds?.oauth_type === 'code_assist' || (!creds?.oauth_type && !!creds?.project_id) }) +// 是否为 Google One OAuth +const isGoogleOne = computed(() => { + const creds = props.account.credentials as GeminiCredentials | undefined + return creds?.oauth_type === 'google_one' +}) + // 是否应该显示配额信息 const shouldShowQuota = computed(() => { return props.account.platform === 'gemini' @@ -55,33 +61,73 @@ const shouldShowQuota = computed(() => { // Tier 标签文本 const tierLabel = computed(() => { + const creds = props.account.credentials as GeminiCredentials | undefined + if (isCodeAssist.value) { - const creds = props.account.credentials as GeminiCredentials | undefined + // GCP Code Assist: 显示 GCP tier const tierMap: Record = { LEGACY: 'Free', PRO: 'Pro', - ULTRA: 'Ultra' + ULTRA: 'Ultra', + 'standard-tier': 'Standard', + 'pro-tier': 'Pro', + 'ultra-tier': 'Ultra' } - return tierMap[creds?.tier_id || ''] || 'Unknown' + return tierMap[creds?.tier_id || ''] || (creds?.tier_id ? 'GCP' : 'Unknown') } + + if (isGoogleOne.value) { + // Google One: tier 映射 + const tierMap: Record = { + AI_PREMIUM: 'AI Premium', + GOOGLE_ONE_STANDARD: 'Standard', + GOOGLE_ONE_BASIC: 'Basic', + FREE: 'Free', + GOOGLE_ONE_UNKNOWN: 'Personal', + GOOGLE_ONE_UNLIMITED: 'Unlimited' + } + return tierMap[creds?.tier_id || ''] || 'Personal' + } + + // AI Studio 或其他 return 'Gemini' }) // Tier Badge 样式 const tierBadgeClass = computed(() => { - if (!isCodeAssist.value) { - return 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-400' - } const creds = props.account.credentials as GeminiCredentials | undefined - const tierColorMap: Record = { - LEGACY: 'bg-gray-100 text-gray-700 dark:bg-gray-900/30 dark:text-gray-400', - PRO: 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-400', - ULTRA: 'bg-amber-100 text-amber-700 dark:bg-amber-900/30 dark:text-amber-400' + + if (isCodeAssist.value) { + // GCP Code Assist 样式 + const tierColorMap: Record = { + LEGACY: 'bg-gray-100 text-gray-700 dark:bg-gray-900/30 dark:text-gray-400', + PRO: 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-400', + ULTRA: 'bg-amber-100 text-amber-700 dark:bg-amber-900/30 dark:text-amber-400', + 'standard-tier': 'bg-green-100 text-green-700 dark:bg-green-900/30 dark:text-green-400', + 'pro-tier': 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-400', + 'ultra-tier': 'bg-amber-100 text-amber-700 dark:bg-amber-900/30 dark:text-amber-400' + } + return ( + tierColorMap[creds?.tier_id || ''] || + 'bg-gray-100 text-gray-700 dark:bg-gray-900/30 dark:text-gray-400' + ) } - return ( - tierColorMap[creds?.tier_id || ''] || - 'bg-gray-100 text-gray-700 dark:bg-gray-900/30 dark:text-gray-400' - ) + + if (isGoogleOne.value) { + // Google One tier 样式 + const tierColorMap: Record = { + AI_PREMIUM: 'bg-purple-100 text-purple-700 dark:bg-purple-900/30 dark:text-purple-400', + GOOGLE_ONE_STANDARD: 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-400', + GOOGLE_ONE_BASIC: 'bg-green-100 text-green-700 dark:bg-green-900/30 dark:text-green-400', + FREE: 'bg-gray-100 text-gray-700 dark:bg-gray-900/30 dark:text-gray-400', + GOOGLE_ONE_UNKNOWN: 'bg-gray-100 text-gray-700 dark:bg-gray-900/30 dark:text-gray-400', + GOOGLE_ONE_UNLIMITED: 'bg-amber-100 text-amber-700 dark:bg-amber-900/30 dark:text-amber-400' + } + return tierColorMap[creds?.tier_id || ''] || 'bg-purple-100 text-purple-700 dark:bg-purple-900/30 dark:text-purple-400' + } + + // AI Studio 默认样式:蓝色 + return 'bg-blue-100 text-blue-700 dark:bg-blue-900/30 dark:text-blue-400' }) // 是否限流 diff --git a/frontend/src/components/account/AccountUsageCell.vue b/frontend/src/components/account/AccountUsageCell.vue index e743c1d2..8dfb9f38 100644 --- a/frontend/src/components/account/AccountUsageCell.vue +++ b/frontend/src/components/account/AccountUsageCell.vue @@ -568,6 +568,24 @@ const isGeminiCodeAssist = computed(() => { // Gemini 账户类型显示标签 const geminiTierLabel = computed(() => { if (!geminiTier.value) return null + + const creds = props.account.credentials as GeminiCredentials | undefined + const isGoogleOne = creds?.oauth_type === 'google_one' + + if (isGoogleOne) { + // Google One tier 标签 + const tierMap: Record = { + AI_PREMIUM: t('admin.accounts.tier.aiPremium'), + GOOGLE_ONE_STANDARD: t('admin.accounts.tier.standard'), + GOOGLE_ONE_BASIC: t('admin.accounts.tier.basic'), + FREE: t('admin.accounts.tier.free'), + GOOGLE_ONE_UNKNOWN: t('admin.accounts.tier.personal'), + GOOGLE_ONE_UNLIMITED: t('admin.accounts.tier.unlimited') + } + return tierMap[geminiTier.value] || t('admin.accounts.tier.personal') + } + + // Code Assist tier 标签 const tierMap: Record = { LEGACY: t('admin.accounts.tier.free'), PRO: t('admin.accounts.tier.pro'), @@ -578,6 +596,25 @@ const geminiTierLabel = computed(() => { // Gemini 账户类型徽章样式 const geminiTierClass = computed(() => { + if (!geminiTier.value) return '' + + const creds = props.account.credentials as GeminiCredentials | undefined + const isGoogleOne = creds?.oauth_type === 'google_one' + + if (isGoogleOne) { + // Google One tier 颜色 + const colorMap: Record = { + AI_PREMIUM: 'bg-purple-100 text-purple-600 dark:bg-purple-900/40 dark:text-purple-300', + GOOGLE_ONE_STANDARD: 'bg-blue-100 text-blue-600 dark:bg-blue-900/40 dark:text-blue-300', + GOOGLE_ONE_BASIC: 'bg-green-100 text-green-600 dark:bg-green-900/40 dark:text-green-300', + FREE: 'bg-gray-100 text-gray-600 dark:bg-gray-700 dark:text-gray-300', + GOOGLE_ONE_UNKNOWN: 'bg-gray-100 text-gray-600 dark:bg-gray-700 dark:text-gray-300', + GOOGLE_ONE_UNLIMITED: 'bg-amber-100 text-amber-600 dark:bg-amber-900/40 dark:text-amber-300' + } + return colorMap[geminiTier.value] || 'bg-gray-100 text-gray-600 dark:bg-gray-700 dark:text-gray-300' + } + + // Code Assist tier 颜色 switch (geminiTier.value) { case 'LEGACY': return 'bg-gray-100 text-gray-600 dark:bg-gray-700 dark:text-gray-300' diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 7cb54dc0..0f188e08 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -455,6 +455,52 @@
+ + + + +
-
- +
+ + +
+ - -
- {{ t('admin.accounts.oauth.gemini.aiStudioNotConfiguredTip') }} + +
+
+ + {{ t('admin.accounts.gemini.oauthType.customTitle') }} + + + {{ t('admin.accounts.gemini.oauthType.customDesc') }} + +
+ {{ t('admin.accounts.gemini.oauthType.customRequirement') }} +
+
+ + {{ t('admin.accounts.gemini.oauthType.badges.orgManaged') }} + + + {{ t('admin.accounts.gemini.oauthType.badges.adminRequired') }} + +
+
+ + {{ t('admin.accounts.oauth.gemini.aiStudioNotConfiguredShort') }} + + + +
+ {{ t('admin.accounts.oauth.gemini.aiStudioNotConfiguredTip') }}
@@ -1610,8 +1672,9 @@ const selectedErrorCodes = ref([]) const customErrorCodeInput = ref(null) const interceptWarmupRequests = ref(false) const mixedScheduling = ref(false) // For antigravity accounts: enable mixed scheduling -const geminiOAuthType = ref<'code_assist' | 'ai_studio'>('code_assist') +const geminiOAuthType = ref<'code_assist' | 'google_one' | 'ai_studio'>('google_one') const geminiAIStudioOAuthEnabled = ref(false) +const showAdvancedOAuth = ref(false) // Common models for whitelist - Anthropic const anthropicModels = [ @@ -1902,7 +1965,7 @@ watch( { immediate: true } ) -const handleSelectGeminiOAuthType = (oauthType: 'code_assist' | 'ai_studio') => { +const handleSelectGeminiOAuthType = (oauthType: 'code_assist' | 'google_one' | 'ai_studio') => { if (oauthType === 'ai_studio' && !geminiAIStudioOAuthEnabled.value) { appStore.showError(t('admin.accounts.oauth.gemini.aiStudioNotConfigured')) return diff --git a/frontend/src/composables/useGeminiOAuth.ts b/frontend/src/composables/useGeminiOAuth.ts index fb20cc2f..14920417 100644 --- a/frontend/src/composables/useGeminiOAuth.ts +++ b/frontend/src/composables/useGeminiOAuth.ts @@ -93,7 +93,13 @@ export function useGeminiOAuth() { const tokenInfo = await adminAPI.gemini.exchangeCode(payload as any) return tokenInfo as GeminiTokenInfo } catch (err: any) { - error.value = err.response?.data?.detail || t('admin.accounts.oauth.gemini.failedToExchangeCode') + // Check for specific missing project_id error + const errorMessage = err.message || err.response?.data?.message || '' + if (errorMessage.includes('missing project_id')) { + error.value = t('admin.accounts.oauth.gemini.missingProjectId') + } else { + error.value = errorMessage || t('admin.accounts.oauth.gemini.failedToExchangeCode') + } appStore.showError(error.value) return null } finally { diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index ac958590..33789586 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1076,6 +1076,7 @@ export default { failedToGenerateUrl: 'Failed to generate Gemini auth URL', missingExchangeParams: 'Missing auth code, session ID, or state', failedToExchangeCode: 'Failed to exchange Gemini auth code', + missingProjectId: 'GCP Project ID retrieval failed: Your Google account is not linked to an active GCP project. Please activate GCP and bind a credit card in Google Cloud Console, or manually enter the Project ID during authorization.', modelPassthrough: 'Gemini Model Passthrough', modelPassthroughDesc: 'All model requests are forwarded directly to the Gemini API without model restrictions or mappings.', @@ -1290,7 +1291,12 @@ export default { tier: { free: 'Free', pro: 'Pro', - ultra: 'Ultra' + ultra: 'Ultra', + aiPremium: 'AI Premium', + standard: 'Standard', + basic: 'Basic', + personal: 'Personal', + unlimited: 'Unlimited' }, ineligibleWarning: 'This account is not eligible for Antigravity, but API forwarding still works. Use at your own risk.' diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index b4de5ada..45d1a9a8 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -996,7 +996,12 @@ export default { tier: { free: 'Free', pro: 'Pro', - ultra: 'Ultra' + ultra: 'Ultra', + aiPremium: 'AI Premium', + standard: '标准版', + basic: '基础版', + personal: '个人版', + unlimited: '无限制' }, ineligibleWarning: '该账号无 Antigravity 使用权限,但仍能进行 API 转发。继续使用请自行承担风险。', @@ -1215,6 +1220,7 @@ export default { failedToGenerateUrl: '生成 Gemini 授权链接失败', missingExchangeParams: '缺少 code / session_id / state', failedToExchangeCode: 'Gemini 授权码兑换失败', + missingProjectId: 'GCP Project ID 获取失败:您的 Google 账号未关联有效的 GCP 项目。请前往 Google Cloud Console 激活 GCP 并绑定信用卡,或在授权时手动填写 Project ID。', modelPassthrough: 'Gemini 直接转发模型', modelPassthroughDesc: '所有模型请求将直接转发至 Gemini API,不进行模型限制或映射。', stateWarningTitle: '提示', From 34bbfb5dd2059f57049fbce72bec5645171d1262 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 14:07:37 +0800 Subject: [PATCH 35/51] =?UTF-8?q?fix(lint):=20=E4=BF=AE=E5=A4=8D=20golangc?= =?UTF-8?q?i-lint=20=E6=A3=80=E6=9F=A5=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 修复未检查的错误返回值 (errcheck) - 移除未使用的 httpClient 字段 (unused) - 修复低效赋值问题 (ineffassign) - 使用 switch 替代 if-else 链 (staticcheck QF1003) - 修复错误字符串首字母大写问题 (staticcheck ST1005) - 运行 gofmt 格式化代码 --- .../internal/handler/admin/account_handler.go | 12 ++++++---- .../internal/pkg/geminicli/drive_client.go | 22 +++++++------------ .../internal/service/gemini_oauth_service.go | 20 +++++++++-------- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 58715706..78b71431 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -1023,6 +1023,10 @@ func (h *AccountHandler) RefreshTier(c *gin.Context) { } tierID, storageInfo, err := h.geminiOAuthService.FetchGoogleOneTier(c.Request.Context(), accessToken, proxyURL) + if err != nil { + response.ErrorFrom(c, err) + return + } if account.Extra == nil { account.Extra = make(map[string]any) @@ -1044,10 +1048,10 @@ func (h *AccountHandler) RefreshTier(c *gin.Context) { } response.Success(c, gin.H{ - "tier_id": tierID, - "drive_storage_limit": account.Extra["drive_storage_limit"], - "drive_storage_usage": account.Extra["drive_storage_usage"], - "updated_at": account.Extra["drive_tier_updated_at"], + "tier_id": tierID, + "drive_storage_limit": account.Extra["drive_storage_limit"], + "drive_storage_usage": account.Extra["drive_storage_usage"], + "updated_at": account.Extra["drive_tier_updated_at"], }) } diff --git a/backend/internal/pkg/geminicli/drive_client.go b/backend/internal/pkg/geminicli/drive_client.go index 5a959fac..79d6835f 100644 --- a/backend/internal/pkg/geminicli/drive_client.go +++ b/backend/internal/pkg/geminicli/drive_client.go @@ -22,17 +22,11 @@ type DriveClient interface { GetStorageQuota(ctx context.Context, accessToken, proxyURL string) (*DriveStorageInfo, error) } -type driveClient struct { - httpClient *http.Client -} +type driveClient struct{} // NewDriveClient creates a new Drive API client func NewDriveClient() DriveClient { - return &driveClient{ - httpClient: &http.Client{ - Timeout: 10 * time.Second, - }, - } + return &driveClient{} } // GetStorageQuota fetches storage quota from Google Drive API @@ -71,7 +65,7 @@ func (c *driveClient) GetStorageQuota(ctx context.Context, accessToken, proxyURL // Rate limit - retry with exponential backoff if resp.StatusCode == http.StatusTooManyRequests && attempt < maxRetries-1 { - resp.Body.Close() + _ = resp.Body.Close() backoff := time.Duration(1< 24 hours) needsRefresh := true if account.Extra != nil { From 48764e15a5fb460415c43ca77c6681dfd01bf480 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 15:07:16 +0800 Subject: [PATCH 36/51] =?UTF-8?q?test(gemini):=20=E6=B7=BB=E5=8A=A0=20Driv?= =?UTF-8?q?e=20API=20=E5=92=8C=20OAuth=20=E6=9C=8D=E5=8A=A1=E5=8D=95?= =?UTF-8?q?=E5=85=83=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增 drive_client_test.go:Drive API 客户端单元测试 - 新增 gemini_oauth_service_test.go:OAuth 服务单元测试 - 重构 account_handler.go:改进 RefreshTier API 实现 - 优化 drive_client.go:增强错误处理和重试逻辑 - 完善 repository 和 service 层:支持批量 tier 刷新 - 更新迁移文件编号:017 -> 024(避免冲突) --- .../internal/handler/admin/account_handler.go | 175 +++++++++--------- .../internal/pkg/geminicli/drive_client.go | 67 ++++++- .../pkg/geminicli/drive_client_test.go | 19 ++ backend/internal/repository/account_repo.go | 84 +++++++++ backend/internal/service/account_service.go | 3 + backend/internal/service/admin_service.go | 14 ++ .../internal/service/gemini_oauth_service.go | 85 +++++++-- .../service/gemini_oauth_service_test.go | 52 ++++++ ...tier_id.sql => 024_add_gemini_tier_id.sql} | 2 +- 9 files changed, 383 insertions(+), 118 deletions(-) create mode 100644 backend/internal/pkg/geminicli/drive_client_test.go create mode 100644 backend/internal/service/gemini_oauth_service_test.go rename backend/migrations/{017_add_gemini_tier_id.sql => 024_add_gemini_tier_id.sql} (94%) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 78b71431..af1e7d91 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -3,7 +3,7 @@ package admin import ( "strconv" "strings" - "time" + "sync" "github.com/Wei-Shaw/sub2api/internal/handler/dto" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" @@ -14,6 +14,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/service" "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" ) // OAuthHandler handles OAuth-related operations for accounts @@ -1000,47 +1001,33 @@ func (h *AccountHandler) RefreshTier(c *gin.Context) { return } - account, err := h.adminService.GetAccount(c.Request.Context(), accountID) + ctx := c.Request.Context() + account, err := h.adminService.GetAccount(ctx, accountID) if err != nil { response.NotFound(c, "Account not found") return } - if account.Credentials == nil || account.Credentials["oauth_type"] != "google_one" { - response.BadRequest(c, "Account is not a google_one OAuth account") + if account.Platform != service.PlatformGemini || account.Type != service.AccountTypeOAuth { + response.BadRequest(c, "Only Gemini OAuth accounts support tier refresh") return } - accessToken, ok := account.Credentials["access_token"].(string) - if !ok || accessToken == "" { - response.BadRequest(c, "Missing access_token in credentials") + oauthType, _ := account.Credentials["oauth_type"].(string) + if oauthType != "google_one" { + response.BadRequest(c, "Only google_one OAuth accounts support tier refresh") return } - var proxyURL string - if account.ProxyID != nil && account.Proxy != nil { - proxyURL = account.Proxy.URL() - } - - tierID, storageInfo, err := h.geminiOAuthService.FetchGoogleOneTier(c.Request.Context(), accessToken, proxyURL) + tierID, extra, creds, err := h.geminiOAuthService.RefreshAccountGoogleOneTier(ctx, account) if err != nil { response.ErrorFrom(c, err) return } - if account.Extra == nil { - account.Extra = make(map[string]any) - } - if storageInfo != nil { - account.Extra["drive_storage_limit"] = storageInfo.Limit - account.Extra["drive_storage_usage"] = storageInfo.Usage - account.Extra["drive_tier_updated_at"] = timezone.Now().Format(time.RFC3339) - } - account.Credentials["tier_id"] = tierID - - _, updateErr := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ - Credentials: account.Credentials, - Extra: account.Extra, + _, updateErr := h.adminService.UpdateAccount(ctx, accountID, &service.UpdateAccountInput{ + Credentials: creds, + Extra: extra, }) if updateErr != nil { response.ErrorFrom(c, updateErr) @@ -1049,9 +1036,10 @@ func (h *AccountHandler) RefreshTier(c *gin.Context) { response.Success(c, gin.H{ "tier_id": tierID, - "drive_storage_limit": account.Extra["drive_storage_limit"], - "drive_storage_usage": account.Extra["drive_storage_usage"], - "updated_at": account.Extra["drive_tier_updated_at"], + "storage_info": extra, + "drive_storage_limit": extra["drive_storage_limit"], + "drive_storage_usage": extra["drive_storage_usage"], + "updated_at": extra["drive_tier_updated_at"], }) } @@ -1069,7 +1057,7 @@ func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { } ctx := c.Request.Context() - var accounts []service.Account + accounts := make([]*service.Account, 0) if len(req.AccountIDs) == 0 { allAccounts, _, err := h.adminService.ListAccounts(ctx, 1, 10000, "gemini", "oauth", "", "") @@ -1077,84 +1065,87 @@ func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { response.ErrorFrom(c, err) return } - for _, acc := range allAccounts { - if acc.Credentials != nil && acc.Credentials["oauth_type"] == "google_one" { + for i := range allAccounts { + acc := &allAccounts[i] + oauthType, _ := acc.Credentials["oauth_type"].(string) + if oauthType == "google_one" { accounts = append(accounts, acc) } } } else { - for _, id := range req.AccountIDs { - acc, err := h.adminService.GetAccount(ctx, id) - if err != nil { + fetched, err := h.adminService.GetAccountsByIDs(ctx, req.AccountIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + for _, acc := range fetched { + if acc == nil { continue } - if acc.Credentials != nil && acc.Credentials["oauth_type"] == "google_one" { - accounts = append(accounts, *acc) + if acc.Platform != service.PlatformGemini || acc.Type != service.AccountTypeOAuth { + continue } + oauthType, _ := acc.Credentials["oauth_type"].(string) + if oauthType != "google_one" { + continue + } + accounts = append(accounts, acc) } } - total := len(accounts) - success := 0 - failed := 0 - errors := []gin.H{} + const maxConcurrency = 10 + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(maxConcurrency) + + var mu sync.Mutex + results := gin.H{ + "total": len(accounts), + "success": 0, + "failed": 0, + "errors": []gin.H{}, + } for _, account := range accounts { - accessToken, ok := account.Credentials["access_token"].(string) - if !ok || accessToken == "" { - failed++ - errors = append(errors, gin.H{ - "account_id": account.ID, - "error": "missing access_token", + acc := account // 闭包捕获 + g.Go(func() error { + _, extra, creds, err := h.geminiOAuthService.RefreshAccountGoogleOneTier(gctx, acc) + if err != nil { + mu.Lock() + results["failed"] = results["failed"].(int) + 1 + results["errors"] = append(results["errors"].([]gin.H), gin.H{ + "account_id": acc.ID, + "error": err.Error(), + }) + mu.Unlock() + return nil + } + + _, updateErr := h.adminService.UpdateAccount(gctx, acc.ID, &service.UpdateAccountInput{ + Credentials: creds, + Extra: extra, }) - continue - } - var proxyURL string - if account.ProxyID != nil && account.Proxy != nil { - proxyURL = account.Proxy.URL() - } + mu.Lock() + if updateErr != nil { + results["failed"] = results["failed"].(int) + 1 + results["errors"] = append(results["errors"].([]gin.H), gin.H{ + "account_id": acc.ID, + "error": updateErr.Error(), + }) + } else { + results["success"] = results["success"].(int) + 1 + } + mu.Unlock() - tierID, storageInfo, err := h.geminiOAuthService.FetchGoogleOneTier(ctx, accessToken, proxyURL) - if err != nil { - failed++ - errors = append(errors, gin.H{ - "account_id": account.ID, - "error": err.Error(), - }) - continue - } - - if account.Extra == nil { - account.Extra = make(map[string]any) - } - if storageInfo != nil { - account.Extra["drive_storage_limit"] = storageInfo.Limit - account.Extra["drive_storage_usage"] = storageInfo.Usage - account.Extra["drive_tier_updated_at"] = timezone.Now().Format(time.RFC3339) - } - account.Credentials["tier_id"] = tierID - - _, updateErr := h.adminService.UpdateAccount(ctx, account.ID, &service.UpdateAccountInput{ - Credentials: account.Credentials, - Extra: account.Extra, + return nil }) - if updateErr != nil { - failed++ - errors = append(errors, gin.H{ - "account_id": account.ID, - "error": updateErr.Error(), - }) - continue - } - - success++ } - response.Success(c, gin.H{ - "total": total, - "success": success, - "failed": failed, - "errors": errors, - }) + if err := g.Wait(); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, results) } diff --git a/backend/internal/pkg/geminicli/drive_client.go b/backend/internal/pkg/geminicli/drive_client.go index 79d6835f..77e2c476 100644 --- a/backend/internal/pkg/geminicli/drive_client.go +++ b/backend/internal/pkg/geminicli/drive_client.go @@ -5,7 +5,9 @@ import ( "encoding/json" "fmt" "io" + "math/rand" "net/http" + "strconv" "time" "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" @@ -49,13 +51,38 @@ func (c *driveClient) GetStorageQuota(ctx context.Context, accessToken, proxyURL return nil, fmt.Errorf("failed to create HTTP client: %w", err) } - // Retry logic with exponential backoff for rate limits + sleepWithContext := func(d time.Duration) error { + timer := time.NewTimer(d) + defer timer.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + return nil + } + } + + // Retry logic with exponential backoff (+ jitter) for rate limits and transient failures var resp *http.Response maxRetries := 3 + rng := rand.New(rand.NewSource(time.Now().UnixNano())) for attempt := 0; attempt < maxRetries; attempt++ { + if ctx.Err() != nil { + return nil, fmt.Errorf("request cancelled: %w", ctx.Err()) + } + resp, err = client.Do(req) if err != nil { - return nil, fmt.Errorf("failed to execute request: %w", err) + // Network error retry + if attempt < maxRetries-1 { + backoff := time.Duration(1< 100*1024*1024*1024*1024 { // > 100TB + if storageBytes > StorageTierUnlimited { return TierGoogleOneUnlimited } - - // AI Premium (2TB+) - if storageBytes >= 2*1024*1024*1024*1024 { // >= 2TB + if storageBytes >= StorageTierAIPremium { return TierAIPremium } - - // Google One Standard (200GB) - if storageBytes >= 200*1024*1024*1024 { // >= 200GB + if storageBytes >= StorageTierStandard { return TierGoogleOneStandard } - - // Google One Basic (100GB) - if storageBytes >= 100*1024*1024*1024 { // >= 100GB + if storageBytes >= StorageTierBasic { return TierGoogleOneBasic } - - // Free (15GB) - if storageBytes >= 15*1024*1024*1024 { // >= 15GB + if storageBytes >= StorageTierFree { return TierFree } - return TierGoogleOneUnknown } @@ -270,6 +271,60 @@ func (s *GeminiOAuthService) FetchGoogleOneTier(ctx context.Context, accessToken return tierID, storageInfo, nil } +// RefreshAccountGoogleOneTier 刷新单个账号的 Google One Tier +func (s *GeminiOAuthService) RefreshAccountGoogleOneTier( + ctx context.Context, + account *Account, +) (tierID string, extra map[string]any, credentials map[string]any, err error) { + if account == nil { + return "", nil, nil, fmt.Errorf("account is nil") + } + + // 验证账号类型 + oauthType, ok := account.Credentials["oauth_type"].(string) + if !ok || oauthType != "google_one" { + return "", nil, nil, fmt.Errorf("not a google_one OAuth account") + } + + // 获取 access_token + accessToken, ok := account.Credentials["access_token"].(string) + if !ok || accessToken == "" { + return "", nil, nil, fmt.Errorf("missing access_token") + } + + // 获取 proxy URL + var proxyURL string + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // 调用 Drive API + tierID, storageInfo, err := s.FetchGoogleOneTier(ctx, accessToken, proxyURL) + if err != nil { + return "", nil, nil, err + } + + // 构建 extra 数据(保留原有 extra 字段) + extra = make(map[string]any) + for k, v := range account.Extra { + extra[k] = v + } + if storageInfo != nil { + extra["drive_storage_limit"] = storageInfo.Limit + extra["drive_storage_usage"] = storageInfo.Usage + extra["drive_tier_updated_at"] = time.Now().Format(time.RFC3339) + } + + // 构建 credentials 数据 + credentials = make(map[string]any) + for k, v := range account.Credentials { + credentials[k] = v + } + credentials["tier_id"] = tierID + + return tierID, extra, credentials, nil +} + func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExchangeCodeInput) (*GeminiTokenInfo, error) { session, ok := s.sessionStore.Get(input.SessionID) if !ok { diff --git a/backend/internal/service/gemini_oauth_service_test.go b/backend/internal/service/gemini_oauth_service_test.go new file mode 100644 index 00000000..393812c2 --- /dev/null +++ b/backend/internal/service/gemini_oauth_service_test.go @@ -0,0 +1,52 @@ +package service + +import "testing" + +func TestInferGoogleOneTier(t *testing.T) { + tests := []struct { + name string + storageBytes int64 + expectedTier string + }{ + {"Negative storage", -1, TierGoogleOneUnknown}, + {"Zero storage", 0, TierGoogleOneUnknown}, + + // Free tier boundary (15GB) + {"Below free tier", 10 * GB, TierGoogleOneUnknown}, + {"Just below free tier", StorageTierFree - 1, TierGoogleOneUnknown}, + {"Free tier (15GB)", StorageTierFree, TierFree}, + + // Basic tier boundary (100GB) + {"Between free and basic", 50 * GB, TierFree}, + {"Just below basic tier", StorageTierBasic - 1, TierFree}, + {"Basic tier (100GB)", StorageTierBasic, TierGoogleOneBasic}, + + // Standard tier boundary (200GB) + {"Between basic and standard", 150 * GB, TierGoogleOneBasic}, + {"Just below standard tier", StorageTierStandard - 1, TierGoogleOneBasic}, + {"Standard tier (200GB)", StorageTierStandard, TierGoogleOneStandard}, + + // AI Premium tier boundary (2TB) + {"Between standard and premium", 1 * TB, TierGoogleOneStandard}, + {"Just below AI Premium tier", StorageTierAIPremium - 1, TierGoogleOneStandard}, + {"AI Premium tier (2TB)", StorageTierAIPremium, TierAIPremium}, + + // Unlimited tier boundary (> 100TB) + {"Between premium and unlimited", 50 * TB, TierAIPremium}, + {"At unlimited threshold (100TB)", StorageTierUnlimited, TierAIPremium}, + {"Unlimited tier (100TB+)", StorageTierUnlimited + 1, TierGoogleOneUnlimited}, + {"Unlimited tier (101TB+)", 101 * TB, TierGoogleOneUnlimited}, + {"Very large storage", 1000 * TB, TierGoogleOneUnlimited}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := inferGoogleOneTier(tt.storageBytes) + if result != tt.expectedTier { + t.Errorf("inferGoogleOneTier(%d) = %s, want %s", + tt.storageBytes, result, tt.expectedTier) + } + }) + } +} + diff --git a/backend/migrations/017_add_gemini_tier_id.sql b/backend/migrations/024_add_gemini_tier_id.sql similarity index 94% rename from backend/migrations/017_add_gemini_tier_id.sql rename to backend/migrations/024_add_gemini_tier_id.sql index 0388a412..d9ac7afe 100644 --- a/backend/migrations/017_add_gemini_tier_id.sql +++ b/backend/migrations/024_add_gemini_tier_id.sql @@ -26,5 +26,5 @@ UPDATE accounts SET credentials = credentials - 'tier_id' WHERE platform = 'gemini' AND type = 'oauth' - AND credentials->>'oauth_type' = 'code_assist'; + AND credentials ? 'tier_id'; -- +goose StatementEnd From c63192fcb55db34b9ac5e670f34eb3cd23ddcb71 Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 15:16:12 +0800 Subject: [PATCH 37/51] =?UTF-8?q?fix(test):=20=E4=BF=AE=E5=A4=8D=20CI=20?= =?UTF-8?q?=E6=B5=8B=E8=AF=95=E5=92=8C=20lint=20=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 为所有 mock 实现添加 GetByIDs 方法以满足 AccountRepository 接口 - 重构 account_handler.go 中的类型断言,使用类型安全的变量 - 修复 gofmt 格式问题 --- .../internal/handler/admin/account_handler.go | 25 +++++++++++-------- .../internal/pkg/geminicli/drive_client.go | 9 ++++--- .../pkg/geminicli/drive_client_test.go | 1 - .../service/account_service_delete_test.go | 4 +++ .../service/gateway_multiplatform_test.go | 10 ++++++++ .../service/gemini_multiplatform_test.go | 10 ++++++++ .../service/gemini_oauth_service_test.go | 1 - 7 files changed, 43 insertions(+), 17 deletions(-) diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index af1e7d91..f2d8a287 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -1099,12 +1099,8 @@ func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { g.SetLimit(maxConcurrency) var mu sync.Mutex - results := gin.H{ - "total": len(accounts), - "success": 0, - "failed": 0, - "errors": []gin.H{}, - } + var successCount, failedCount int + var errors []gin.H for _, account := range accounts { acc := account // 闭包捕获 @@ -1112,8 +1108,8 @@ func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { _, extra, creds, err := h.geminiOAuthService.RefreshAccountGoogleOneTier(gctx, acc) if err != nil { mu.Lock() - results["failed"] = results["failed"].(int) + 1 - results["errors"] = append(results["errors"].([]gin.H), gin.H{ + failedCount++ + errors = append(errors, gin.H{ "account_id": acc.ID, "error": err.Error(), }) @@ -1128,13 +1124,13 @@ func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { mu.Lock() if updateErr != nil { - results["failed"] = results["failed"].(int) + 1 - results["errors"] = append(results["errors"].([]gin.H), gin.H{ + failedCount++ + errors = append(errors, gin.H{ "account_id": acc.ID, "error": updateErr.Error(), }) } else { - results["success"] = results["success"].(int) + 1 + successCount++ } mu.Unlock() @@ -1147,5 +1143,12 @@ func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { return } + results := gin.H{ + "total": len(accounts), + "success": successCount, + "failed": failedCount, + "errors": errors, + } + response.Success(c, results) } diff --git a/backend/internal/pkg/geminicli/drive_client.go b/backend/internal/pkg/geminicli/drive_client.go index 77e2c476..8f9c745f 100644 --- a/backend/internal/pkg/geminicli/drive_client.go +++ b/backend/internal/pkg/geminicli/drive_client.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "io" "math/rand" "net/http" "strconv" @@ -112,10 +111,12 @@ func (c *driveClient) GetStorageQuota(ctx context.Context, accessToken, proxyURL } if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) _ = resp.Body.Close() - // 记录完整错误 - fmt.Printf("[DriveClient] API error (status %d): %s\n", resp.StatusCode, string(body)) + statusText := http.StatusText(resp.StatusCode) + if statusText == "" { + statusText = resp.Status + } + fmt.Printf("[DriveClient] Drive API error: status=%d, msg=%s\n", resp.StatusCode, statusText) // 只返回通用错误 return nil, fmt.Errorf("drive API error: status %d", resp.StatusCode) } diff --git a/backend/internal/pkg/geminicli/drive_client_test.go b/backend/internal/pkg/geminicli/drive_client_test.go index d2c7f25b..b6dd1a69 100644 --- a/backend/internal/pkg/geminicli/drive_client_test.go +++ b/backend/internal/pkg/geminicli/drive_client_test.go @@ -16,4 +16,3 @@ func TestDriveStorageInfo(t *testing.T) { t.Errorf("Expected usage 50GB, got %d", info.Usage) } } - diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go index 2648b828..43703763 100644 --- a/backend/internal/service/account_service_delete_test.go +++ b/backend/internal/service/account_service_delete_test.go @@ -40,6 +40,10 @@ func (s *accountRepoStub) GetByID(ctx context.Context, id int64) (*Account, erro panic("unexpected GetByID call") } +func (s *accountRepoStub) GetByIDs(ctx context.Context, ids []int64) ([]*Account, error) { + panic("unexpected GetByIDs call") +} + // ExistsByID 返回预设的存在性检查结果。 // 这是 Delete 方法调用的第一个仓储方法,用于验证账号是否存在。 func (s *accountRepoStub) ExistsByID(ctx context.Context, id int64) (bool, error) { diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index 560c7767..808a48b2 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -32,6 +32,16 @@ func (m *mockAccountRepoForPlatform) GetByID(ctx context.Context, id int64) (*Ac return nil, errors.New("account not found") } +func (m *mockAccountRepoForPlatform) GetByIDs(ctx context.Context, ids []int64) ([]*Account, error) { + var result []*Account + for _, id := range ids { + if acc, ok := m.accountsByID[id]; ok { + result = append(result, acc) + } + } + return result, nil +} + func (m *mockAccountRepoForPlatform) ExistsByID(ctx context.Context, id int64) (bool, error) { if m.accountsByID == nil { return false, nil diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index dcc945eb..6ca5052e 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -25,6 +25,16 @@ func (m *mockAccountRepoForGemini) GetByID(ctx context.Context, id int64) (*Acco return nil, errors.New("account not found") } +func (m *mockAccountRepoForGemini) GetByIDs(ctx context.Context, ids []int64) ([]*Account, error) { + var result []*Account + for _, id := range ids { + if acc, ok := m.accountsByID[id]; ok { + result = append(result, acc) + } + } + return result, nil +} + func (m *mockAccountRepoForGemini) ExistsByID(ctx context.Context, id int64) (bool, error) { if m.accountsByID == nil { return false, nil diff --git a/backend/internal/service/gemini_oauth_service_test.go b/backend/internal/service/gemini_oauth_service_test.go index 393812c2..026e6dc2 100644 --- a/backend/internal/service/gemini_oauth_service_test.go +++ b/backend/internal/service/gemini_oauth_service_test.go @@ -49,4 +49,3 @@ func TestInferGoogleOneTier(t *testing.T) { }) } } - From 1d5e05b8cadcab00b6d02b4c9cad5e8b168e1fbb Mon Sep 17 00:00:00 2001 From: IanShaw027 <131567472+IanShaw027@users.noreply.github.com> Date: Thu, 1 Jan 2026 15:35:08 +0800 Subject: [PATCH 38/51] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20P0=20?= =?UTF-8?q?=E5=AE=89=E5=85=A8=E5=92=8C=E5=B9=B6=E5=8F=91=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 修复敏感信息泄露:移除 Drive API 完整响应体打印,只记录状态码 - 修复并发安全问题:升级为 RWMutex,读写分离提升性能 - 修复资源泄漏风险:使用 defer 确保 resp.Body 正确关闭 --- backend/internal/pkg/geminicli/drive_client.go | 10 ++++++---- backend/internal/service/ratelimit_service.go | 6 +++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/backend/internal/pkg/geminicli/drive_client.go b/backend/internal/pkg/geminicli/drive_client.go index 8f9c745f..a6cbc3ab 100644 --- a/backend/internal/pkg/geminicli/drive_client.go +++ b/backend/internal/pkg/geminicli/drive_client.go @@ -94,10 +94,12 @@ func (c *driveClient) GetStorageQuota(ctx context.Context, accessToken, proxyURL resp.StatusCode == http.StatusInternalServerError || resp.StatusCode == http.StatusBadGateway || resp.StatusCode == http.StatusServiceUnavailable) && attempt < maxRetries-1 { - _ = resp.Body.Close() - backoff := time.Duration(1< Date: Thu, 1 Jan 2026 16:03:48 +0800 Subject: [PATCH 39/51] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=E7=99=BD=E5=90=8D=E5=8D=95=E9=80=89=E6=8B=A9=E5=99=A8?= =?UTF-8?q?=E7=BB=84=E4=BB=B6=EF=BC=8C=E5=90=8C=E6=AD=A5=20new-api=20?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=88=97=E8=A1=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增 ModelWhitelistSelector.vue 支持模型白名单多选 - 新增 ModelIcon.vue 显示品牌图标(基于 @lobehub/icons) - 新增 useModelWhitelist.ts 硬编码各平台模型列表 - 更新账号编辑表单支持模型白名单配置 - 支持 Claude/OpenAI/Gemini/智谱/百度/讯飞等主流平台 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- backend/cmd/server/wire_gen.go | 8 +- backend/internal/handler/gateway_handler.go | 37 +- backend/internal/service/gateway_service.go | 55 + frontend/package-lock.json | 7564 ++++++++++++++++- frontend/package.json | 2 + frontend/pnpm-lock.yaml | 99 +- .../components/account/CreateAccountModal.vue | 261 +- .../components/account/EditAccountModal.vue | 244 +- .../account/ModelWhitelistSelector.vue | 201 + frontend/src/components/common/ModelIcon.vue | 278 + frontend/src/composables/useModelWhitelist.ts | 299 + frontend/src/i18n/locales/en.ts | 9 + frontend/src/i18n/locales/zh.ts | 9 + frontend/tsconfig.json | 3 +- 14 files changed, 8562 insertions(+), 507 deletions(-) create mode 100644 frontend/src/components/account/ModelWhitelistSelector.vue create mode 100644 frontend/src/components/common/ModelIcon.vue create mode 100644 frontend/src/composables/useModelWhitelist.ts diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 79827b26..7bcfb899 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -114,15 +114,15 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { gitHubReleaseClient := repository.NewGitHubReleaseClient() serviceBuildInfo := provideServiceBuildInfo(buildInfo) updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo) - systemHandler := handler.ProvideSystemHandler(updateService) - adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) - adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler) pricingRemoteClient := repository.NewPricingRemoteClient() pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient) if err != nil { return nil, err } + systemHandler := handler.ProvideSystemHandler(updateService) + adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) + adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler) billingService := service.NewBillingService(configConfig, pricingService) identityCache := repository.NewIdentityCache(redisClient) identityService := service.NewIdentityService(identityCache) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index bf179ea1..b34782d7 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -300,12 +300,42 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // Models handles listing available models // GET /v1/models -// Returns different model lists based on the API key's group platform +// Returns models based on account configurations (model_mapping whitelist) +// Falls back to default models if no whitelist is configured func (h *GatewayHandler) Models(c *gin.Context) { apiKey, _ := middleware2.GetApiKeyFromContext(c) - // Return OpenAI models for OpenAI platform groups - if apiKey != nil && apiKey.Group != nil && apiKey.Group.Platform == "openai" { + var groupID *int64 + var platform string + + if apiKey != nil && apiKey.Group != nil { + groupID = &apiKey.Group.ID + platform = apiKey.Group.Platform + } + + // Get available models from account configurations (without platform filter) + availableModels := h.gatewayService.GetAvailableModels(c.Request.Context(), groupID, "") + + if len(availableModels) > 0 { + // Build model list from whitelist + models := make([]claude.Model, 0, len(availableModels)) + for _, modelID := range availableModels { + models = append(models, claude.Model{ + ID: modelID, + Type: "model", + DisplayName: modelID, + CreatedAt: "2024-01-01T00:00:00Z", + }) + } + c.JSON(http.StatusOK, gin.H{ + "object": "list", + "data": models, + }) + return + } + + // Fallback to default models + if platform == "openai" { c.JSON(http.StatusOK, gin.H{ "object": "list", "data": openai.DefaultModels, @@ -313,7 +343,6 @@ func (h *GatewayHandler) Models(c *gin.Context) { return } - // Default: Claude models c.JSON(http.StatusOK, gin.H{ "object": "list", "data": claude.DefaultModels, diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 50bfd161..b33e3283 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -1440,3 +1440,58 @@ func (s *GatewayService) countTokensError(c *gin.Context, status int, errType, m }, }) } + +// GetAvailableModels returns the list of models available for a group +// It aggregates model_mapping keys from all schedulable accounts in the group +func (s *GatewayService) GetAvailableModels(ctx context.Context, groupID *int64, platform string) []string { + var accounts []Account + var err error + + if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupID(ctx, *groupID) + } else { + accounts, err = s.accountRepo.ListSchedulable(ctx) + } + + if err != nil || len(accounts) == 0 { + return nil + } + + // Filter by platform if specified + if platform != "" { + filtered := make([]Account, 0) + for _, acc := range accounts { + if acc.Platform == platform { + filtered = append(filtered, acc) + } + } + accounts = filtered + } + + // Collect unique models from all accounts + modelSet := make(map[string]struct{}) + hasAnyMapping := false + + for _, acc := range accounts { + mapping := acc.GetModelMapping() + if len(mapping) > 0 { + hasAnyMapping = true + for model := range mapping { + modelSet[model] = struct{}{} + } + } + } + + // If no account has model_mapping, return nil (use default) + if !hasAnyMapping { + return nil + } + + // Convert to slice + models := make([]string, 0, len(modelSet)) + for model := range modelSet { + models = append(models, model) + } + + return models +} diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 6563ee0c..0fab353c 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -8,6 +8,7 @@ "name": "sub2api-frontend", "version": "1.0.0", "dependencies": { + "@lobehub/icons": "^4.0.2", "@vueuse/core": "^10.7.0", "axios": "^1.6.2", "chart.js": "^4.4.1", @@ -22,6 +23,7 @@ }, "devDependencies": { "@types/file-saver": "^2.0.7", + "@types/mdx": "^2.0.13", "@types/node": "^20.10.5", "@vitejs/plugin-vue": "^5.2.3", "autoprefixer": "^10.4.16", @@ -46,11 +48,116 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@ant-design/colors": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@ant-design/colors/-/colors-8.0.0.tgz", + "integrity": "sha512-6YzkKCw30EI/E9kHOIXsQDHmMvTllT8STzjMb4K2qzit33RW2pqCJP0sk+hidBntXxE+Vz4n1+RvCTfBw6OErw==", + "license": "MIT", + "dependencies": { + "@ant-design/fast-color": "^3.0.0" + } + }, + "node_modules/@ant-design/cssinjs": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@ant-design/cssinjs/-/cssinjs-2.0.1.tgz", + "integrity": "sha512-Lw1Z4cUQxdMmTNir67gU0HCpTl5TtkKCJPZ6UBvCqzcOTl/QmMFB6qAEoj8qFl0CuZDX9qQYa3m9+rEKfaBSbA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@emotion/hash": "^0.8.0", + "@emotion/unitless": "^0.7.5", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1", + "csstype": "^3.1.3", + "stylis": "^4.3.4" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/cssinjs-utils": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@ant-design/cssinjs-utils/-/cssinjs-utils-2.0.2.tgz", + "integrity": "sha512-Mq3Hm6fJuQeFNKSp3+yT4bjuhVbdrsyXE2RyfpJFL0xiYNZdaJ6oFaE3zFrzmHbmvTd2Wp3HCbRtkD4fU+v2ZA==", + "license": "MIT", + "dependencies": { + "@ant-design/cssinjs": "^2.0.1", + "@babel/runtime": "^7.23.2", + "@rc-component/util": "^1.4.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + } + }, + "node_modules/@ant-design/fast-color": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@ant-design/fast-color/-/fast-color-3.0.0.tgz", + "integrity": "sha512-eqvpP7xEDm2S7dUzl5srEQCBTXZMmY3ekf97zI+M2DHOYyKdJGH0qua0JACHTqbkRnD/KHFQP9J1uMJ/XWVzzA==", + "license": "MIT", + "engines": { + "node": ">=8.x" + } + }, + "node_modules/@ant-design/icons": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@ant-design/icons/-/icons-6.1.0.tgz", + "integrity": "sha512-KrWMu1fIg3w/1F2zfn+JlfNDU8dDqILfA5Tg85iqs1lf8ooyGlbkA+TkwfOKKgqpUmAiRY1PTFpuOU2DAIgSUg==", + "license": "MIT", + "dependencies": { + "@ant-design/colors": "^8.0.0", + "@ant-design/icons-svg": "^4.4.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/icons-svg": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@ant-design/icons-svg/-/icons-svg-4.4.2.tgz", + "integrity": "sha512-vHbT+zJEVzllwP+CM+ul7reTEfBR0vgxFe7+lREAsAA7YGsYpboiq2sQNeQeRvh09GfQgs/GyFEvZpJ9cLXpXA==", + "license": "MIT" + }, + "node_modules/@ant-design/react-slick": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@ant-design/react-slick/-/react-slick-2.0.0.tgz", + "integrity": "sha512-HMS9sRoEmZey8LsE/Yo6+klhlzU12PisjrVcydW3So7RdklyEd2qehyU6a7Yp+OYN72mgsYs3NFCyP2lCPFVqg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.28.4", + "clsx": "^2.1.1", + "json2mq": "^0.2.0", + "throttle-debounce": "^5.0.0" + }, + "peerDependencies": { + "react": "^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@antfu/install-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@antfu/install-pkg/-/install-pkg-1.1.0.tgz", + "integrity": "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==", + "license": "MIT", + "dependencies": { + "package-manager-detector": "^1.3.0", + "tinyexec": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, "node_modules/@babel/code-frame": { "version": "7.27.1", "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.27.1.tgz", "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", - "dev": true, "license": "MIT", "dependencies": { "@babel/helper-validator-identifier": "^7.27.1", @@ -61,6 +168,44 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-string-parser": { "version": "7.27.1", "resolved": "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", @@ -94,6 +239,47 @@ "node": ">=6.0.0" } }, + "node_modules/@babel/runtime": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", + "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/types": { "version": "7.28.5", "resolved": "https://registry.npmmirror.com/@babel/types/-/types-7.28.5.tgz", @@ -107,6 +293,377 @@ "node": ">=6.9.0" } }, + "node_modules/@base-ui/react": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@base-ui/react/-/react-1.0.0.tgz", + "integrity": "sha512-4USBWz++DUSLTuIYpbYkSgy1F9ZmNG9S/lXvlUN6qMK0P0RlW+6eQmDUB4DgZ7HVvtXl4pvi4z5J2fv6Z3+9hg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.28.4", + "@base-ui/utils": "0.2.3", + "@floating-ui/react-dom": "^2.1.6", + "@floating-ui/utils": "^0.2.10", + "reselect": "^5.1.1", + "tabbable": "^6.3.0", + "use-sync-external-store": "^1.6.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mui-org" + }, + "peerDependencies": { + "@types/react": "^17 || ^18 || ^19", + "react": "^17 || ^18 || ^19", + "react-dom": "^17 || ^18 || ^19" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@base-ui/utils": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@base-ui/utils/-/utils-0.2.3.tgz", + "integrity": "sha512-/CguQ2PDaOzeVOkllQR8nocJ0FFIDqsWIcURsVmm53QGo8NhFNpePjNlyPIB41luxfOqnG7PU0xicMEw3ls7XQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.28.4", + "@floating-ui/utils": "^0.2.10", + "reselect": "^5.1.1", + "use-sync-external-store": "^1.6.0" + }, + "peerDependencies": { + "@types/react": "^17 || ^18 || ^19", + "react": "^17 || ^18 || ^19", + "react-dom": "^17 || ^18 || ^19" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@braintree/sanitize-url": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.1.1.tgz", + "integrity": "sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==", + "license": "MIT" + }, + "node_modules/@chevrotain/cst-dts-gen": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.0.3.tgz", + "integrity": "sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/gast": "11.0.3", + "@chevrotain/types": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/@chevrotain/cst-dts-gen/node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + }, + "node_modules/@chevrotain/gast": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.0.3.tgz", + "integrity": "sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/types": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/@chevrotain/gast/node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + }, + "node_modules/@chevrotain/regexp-to-ast": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.0.3.tgz", + "integrity": "sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/types": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.0.3.tgz", + "integrity": "sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/utils": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.0.3.tgz", + "integrity": "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==", + "license": "Apache-2.0" + }, + "node_modules/@dnd-kit/accessibility": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@dnd-kit/accessibility/-/accessibility-3.1.1.tgz", + "integrity": "sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@dnd-kit/core": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@dnd-kit/core/-/core-6.3.1.tgz", + "integrity": "sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "@dnd-kit/accessibility": "^3.1.1", + "@dnd-kit/utilities": "^3.2.2", + "tslib": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@dnd-kit/modifiers": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/@dnd-kit/modifiers/-/modifiers-9.0.0.tgz", + "integrity": "sha512-ybiLc66qRGuZoC20wdSSG6pDXFikui/dCNGthxv4Ndy8ylErY0N3KVxY2bgo7AWwIbxDmXDg3ylAFmnrjcbVvw==", + "license": "MIT", + "dependencies": { + "@dnd-kit/utilities": "^3.2.2", + "tslib": "^2.0.0" + }, + "peerDependencies": { + "@dnd-kit/core": "^6.3.0", + "react": ">=16.8.0" + } + }, + "node_modules/@dnd-kit/sortable": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@dnd-kit/sortable/-/sortable-10.0.0.tgz", + "integrity": "sha512-+xqhmIIzvAYMGfBYYnbKuNicfSsk4RksY2XdmJhT+HAC01nix6fHCztU68jooFiMUB01Ky3F0FyOvhG/BZrWkg==", + "license": "MIT", + "dependencies": { + "@dnd-kit/utilities": "^3.2.2", + "tslib": "^2.0.0" + }, + "peerDependencies": { + "@dnd-kit/core": "^6.3.0", + "react": ">=16.8.0" + } + }, + "node_modules/@dnd-kit/utilities": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@dnd-kit/utilities/-/utilities-3.2.2.tgz", + "integrity": "sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@emoji-mart/data": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@emoji-mart/data/-/data-1.2.1.tgz", + "integrity": "sha512-no2pQMWiBy6gpBEiqGeU77/bFejDqUTRY7KX+0+iur13op3bqUsXdnwoZs6Xb1zbv0gAj5VvS1PWoUUckSr5Dw==", + "license": "MIT" + }, + "node_modules/@emoji-mart/react": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@emoji-mart/react/-/react-1.1.1.tgz", + "integrity": "sha512-NMlFNeWgv1//uPsvLxvGQoIerPuVdXwK/EUek8OOkJ6wVOWPUizRBJU0hDqWZCOROVpfBgCemaC3m6jDOXi03g==", + "license": "MIT", + "peerDependencies": { + "emoji-mart": "^5.2", + "react": "^16.8 || ^17 || ^18" + } + }, + "node_modules/@emotion/babel-plugin": { + "version": "11.13.5", + "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz", + "integrity": "sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.16.7", + "@babel/runtime": "^7.18.3", + "@emotion/hash": "^0.9.2", + "@emotion/memoize": "^0.9.0", + "@emotion/serialize": "^1.3.3", + "babel-plugin-macros": "^3.1.0", + "convert-source-map": "^1.5.0", + "escape-string-regexp": "^4.0.0", + "find-root": "^1.1.0", + "source-map": "^0.5.7", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/babel-plugin/node_modules/@emotion/hash": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", + "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", + "license": "MIT" + }, + "node_modules/@emotion/babel-plugin/node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@emotion/babel-plugin/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", + "license": "MIT" + }, + "node_modules/@emotion/cache": { + "version": "11.14.0", + "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.14.0.tgz", + "integrity": "sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA==", + "license": "MIT", + "dependencies": { + "@emotion/memoize": "^0.9.0", + "@emotion/sheet": "^1.4.0", + "@emotion/utils": "^1.4.2", + "@emotion/weak-memoize": "^0.4.0", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/cache/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", + "license": "MIT" + }, + "node_modules/@emotion/css": { + "version": "11.13.5", + "resolved": "https://registry.npmjs.org/@emotion/css/-/css-11.13.5.tgz", + "integrity": "sha512-wQdD0Xhkn3Qy2VNcIzbLP9MR8TafI0MJb7BEAXKp+w4+XqErksWR4OXomuDzPsN4InLdGhVe6EYcn2ZIUCpB8w==", + "license": "MIT", + "dependencies": { + "@emotion/babel-plugin": "^11.13.5", + "@emotion/cache": "^11.13.5", + "@emotion/serialize": "^1.3.3", + "@emotion/sheet": "^1.4.0", + "@emotion/utils": "^1.4.2" + } + }, + "node_modules/@emotion/hash": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.8.0.tgz", + "integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==", + "license": "MIT" + }, + "node_modules/@emotion/is-prop-valid": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.4.0.tgz", + "integrity": "sha512-QgD4fyscGcbbKwJmqNvUMSE02OsHUa+lAWKdEUIJKgqe5IwRSKd7+KhibEWdaKwgjLj0DRSHA9biAIqGBk05lw==", + "license": "MIT", + "peer": true, + "dependencies": { + "@emotion/memoize": "^0.9.0" + } + }, + "node_modules/@emotion/memoize": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.9.0.tgz", + "integrity": "sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==", + "license": "MIT" + }, + "node_modules/@emotion/react": { + "version": "11.14.0", + "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.14.0.tgz", + "integrity": "sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@emotion/babel-plugin": "^11.13.5", + "@emotion/cache": "^11.14.0", + "@emotion/serialize": "^1.3.3", + "@emotion/use-insertion-effect-with-fallbacks": "^1.2.0", + "@emotion/utils": "^1.4.2", + "@emotion/weak-memoize": "^0.4.0", + "hoist-non-react-statics": "^3.3.1" + }, + "peerDependencies": { + "react": ">=16.8.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@emotion/serialize": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.3.3.tgz", + "integrity": "sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA==", + "license": "MIT", + "dependencies": { + "@emotion/hash": "^0.9.2", + "@emotion/memoize": "^0.9.0", + "@emotion/unitless": "^0.10.0", + "@emotion/utils": "^1.4.2", + "csstype": "^3.0.2" + } + }, + "node_modules/@emotion/serialize/node_modules/@emotion/hash": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", + "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", + "license": "MIT" + }, + "node_modules/@emotion/serialize/node_modules/@emotion/unitless": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.10.0.tgz", + "integrity": "sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==", + "license": "MIT" + }, + "node_modules/@emotion/sheet": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.4.0.tgz", + "integrity": "sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==", + "license": "MIT" + }, + "node_modules/@emotion/unitless": { + "version": "0.7.5", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.7.5.tgz", + "integrity": "sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==", + "license": "MIT" + }, + "node_modules/@emotion/use-insertion-effect-with-fallbacks": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz", + "integrity": "sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg==", + "license": "MIT", + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@emotion/utils": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.4.2.tgz", + "integrity": "sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA==", + "license": "MIT" + }, + "node_modules/@emotion/weak-memoize": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz", + "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", + "license": "MIT" + }, "node_modules/@esbuild/aix-ppc64": { "version": "0.21.5", "resolved": "https://registry.npmmirror.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", @@ -498,6 +1055,88 @@ "node": ">=12" } }, + "node_modules/@floating-ui/core": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", + "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", + "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.3", + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/react": { + "version": "0.27.16", + "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.27.16.tgz", + "integrity": "sha512-9O8N4SeG2z++TSM8QA/KTeKFBVCNEz/AGS7gWPJf6KFRzmRWixFRnCnkPHRDwSVZW6QPDO6uT0P2SpWNKCc9/g==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.1.6", + "@floating-ui/utils": "^0.2.10", + "tabbable": "^6.0.0" + }, + "peerDependencies": { + "react": ">=17.0.0", + "react-dom": ">=17.0.0" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.6.tgz", + "integrity": "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.7.4" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + "license": "MIT" + }, + "node_modules/@giscus/react": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@giscus/react/-/react-3.1.0.tgz", + "integrity": "sha512-0TCO2TvL43+oOdyVVGHDItwxD1UMKP2ZYpT6gXmhFOqfAJtZxTzJ9hkn34iAF/b6YzyJ4Um89QIt9z/ajmAEeg==", + "dependencies": { + "giscus": "^1.6.0" + }, + "peerDependencies": { + "react": "^16 || ^17 || ^18 || ^19", + "react-dom": "^16 || ^17 || ^18 || ^19" + } + }, + "node_modules/@iconify/types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@iconify/types/-/types-2.0.0.tgz", + "integrity": "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==", + "license": "MIT" + }, + "node_modules/@iconify/utils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@iconify/utils/-/utils-3.1.0.tgz", + "integrity": "sha512-Zlzem1ZXhI1iHeeERabLNzBHdOa4VhQbqAcOQaMKuTuyZCpwKbC2R4Dd0Zo3g9EAc+Y4fiarO8HIHRAth7+skw==", + "license": "MIT", + "dependencies": { + "@antfu/install-pkg": "^1.1.0", + "@iconify/types": "^2.0.0", + "mlly": "^1.8.0" + } + }, "node_modules/@intlify/core-base": { "version": "9.14.5", "resolved": "https://registry.npmmirror.com/@intlify/core-base/-/core-base-9.14.5.tgz", @@ -546,7 +1185,6 @@ "version": "0.3.13", "resolved": "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", - "dev": true, "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", @@ -557,7 +1195,6 @@ "version": "3.1.2", "resolved": "https://registry.npmmirror.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "dev": true, "license": "MIT", "engines": { "node": ">=6.0.0" @@ -573,7 +1210,6 @@ "version": "0.3.31", "resolved": "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", - "dev": true, "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", @@ -586,6 +1222,229 @@ "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==", "license": "MIT" }, + "node_modules/@lit-labs/ssr-dom-shim": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@lit-labs/ssr-dom-shim/-/ssr-dom-shim-1.5.0.tgz", + "integrity": "sha512-HLomZXMmrCFHSRKESF5vklAKsDY7/fsT/ZhqCu3V0UoW/Qbv8wxmO4W9bx4KnCCF2Zak4yuk+AGraK/bPmI4kA==", + "license": "BSD-3-Clause" + }, + "node_modules/@lit/reactive-element": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@lit/reactive-element/-/reactive-element-2.1.2.tgz", + "integrity": "sha512-pbCDiVMnne1lYUIaYNN5wrwQXDtHaYtg7YEFPeW+hws6U47WeFvISGUWekPGKWOP1ygrs0ef0o1VJMk1exos5A==", + "license": "BSD-3-Clause", + "dependencies": { + "@lit-labs/ssr-dom-shim": "^1.5.0" + } + }, + "node_modules/@lobehub/emojilib": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@lobehub/emojilib/-/emojilib-1.0.0.tgz", + "integrity": "sha512-s9KnjaPjsEefaNv150G3aifvB+J3P4eEKG+epY9zDPS2BeB6+V2jELWqAZll+nkogMaVovjEE813z3V751QwGw==", + "license": "MIT" + }, + "node_modules/@lobehub/fluent-emoji": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@lobehub/fluent-emoji/-/fluent-emoji-4.1.0.tgz", + "integrity": "sha512-R1MB2lfUkDvB7XAQdRzY75c1dx/tB7gEvBPaEEMarzKfCJWmXm7rheS6caVzmgwAlq5sfmTbxPL+un99sp//Yw==", + "license": "MIT", + "peer": true, + "dependencies": { + "@lobehub/emojilib": "^1.0.0", + "antd-style": "^4.1.0", + "emoji-regex": "^10.6.0", + "es-toolkit": "^1.43.0", + "lucide-react": "^0.562.0", + "url-join": "^5.0.0" + }, + "peerDependencies": { + "react": "^19.0.0", + "react-dom": "^19.0.0" + } + }, + "node_modules/@lobehub/icons": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@lobehub/icons/-/icons-4.0.2.tgz", + "integrity": "sha512-mYFEXXt7Z8iY8yLP5cDVctUPqlZUHWi5qzQCJiC646p7uiXhtpn93sRab/5pey+CYDh6BbRU6lhwiURu/SU5IA==", + "license": "MIT", + "peer": true, + "workspaces": [ + "packages/*" + ], + "dependencies": { + "antd-style": "^4.1.0", + "lucide-react": "^0.469.0", + "polished": "^4.3.1" + }, + "peerDependencies": { + "@lobehub/ui": "^4.3.3", + "antd": "^6.1.1", + "react": "^19.0.0", + "react-dom": "^19.0.0" + } + }, + "node_modules/@lobehub/icons/node_modules/lucide-react": { + "version": "0.469.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.469.0.tgz", + "integrity": "sha512-28vvUnnKQ/dBwiCQtwJw7QauYnE7yd2Cyp4tTTJpvglX4EMpbflcdBgrgToX2j71B3YvugK/NH3BGUk+E/p/Fw==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@lobehub/ui": { + "version": "4.6.3", + "resolved": "https://registry.npmjs.org/@lobehub/ui/-/ui-4.6.3.tgz", + "integrity": "sha512-1roaNTgLGLDOsfoa7nNlmvE+F8OMIDCvprkudE9Ci/SgTzJmtQCV+jD3rDnedJRZ73cSOPgqkm/O7f+mthwEDA==", + "license": "MIT", + "peer": true, + "dependencies": { + "@ant-design/cssinjs": "^2.0.1", + "@base-ui/react": "^1.0.0", + "@dnd-kit/core": "^6.3.1", + "@dnd-kit/modifiers": "^9.0.0", + "@dnd-kit/sortable": "^10.0.0", + "@dnd-kit/utilities": "^3.2.2", + "@emoji-mart/data": "^1.2.1", + "@emoji-mart/react": "^1.1.1", + "@emotion/is-prop-valid": "^1.4.0", + "@floating-ui/react": "^0.27.16", + "@giscus/react": "^3.1.0", + "@mdx-js/mdx": "^3.1.1", + "@mdx-js/react": "^3.1.1", + "@radix-ui/react-slot": "^1.2.4", + "@shikijs/core": "^3.20.0", + "@shikijs/transformers": "^3.20.0", + "@splinetool/runtime": "0.9.526", + "ahooks": "^3.9.6", + "antd-style": "^4.1.0", + "chroma-js": "^3.2.0", + "class-variance-authority": "^0.7.1", + "dayjs": "^1.11.19", + "emoji-mart": "^5.6.0", + "es-toolkit": "^1.32.0", + "fast-deep-equal": "^3.1.3", + "immer": "^11.0.1", + "katex": "^0.16.27", + "leva": "^0.10.1", + "lucide-react": "^0.562.0", + "marked": "^17.0.1", + "mermaid": "^11.12.2", + "motion": "^12.23.26", + "numeral": "^2.0.6", + "polished": "^4.3.1", + "query-string": "^9.3.1", + "rc-collapse": "^4.0.0", + "rc-footer": "^0.6.8", + "rc-image": "^7.12.0", + "rc-input-number": "^9.5.0", + "rc-menu": "^9.16.1", + "re-resizable": "^6.11.2", + "react-avatar-editor": "^14.0.0", + "react-error-boundary": "^6.0.0", + "react-hotkeys-hook": "^5.2.1", + "react-markdown": "^10.1.0", + "react-merge-refs": "^3.0.2", + "react-rnd": "^10.5.2", + "react-zoom-pan-pinch": "^3.7.0", + "rehype-github-alerts": "^4.2.0", + "rehype-katex": "^7.0.1", + "rehype-raw": "^7.0.0", + "remark-breaks": "^4.0.0", + "remark-cjk-friendly": "^1.2.3", + "remark-gfm": "^4.0.1", + "remark-github": "^12.0.0", + "remark-math": "^6.0.0", + "shiki": "^3.20.0", + "shiki-stream": "^0.1.3", + "swr": "^2.3.8", + "ts-md5": "^2.0.1", + "unified": "^11.0.5", + "url-join": "^5.0.0", + "use-merge-value": "^1.2.0", + "uuid": "^13.0.0" + }, + "peerDependencies": { + "@lobehub/fluent-emoji": "^4.0.0", + "@lobehub/icons": "^4.0.0", + "antd": "^6.1.1", + "motion": "^12.0.0", + "react": "^19.0.0", + "react-dom": "^19.0.0" + } + }, + "node_modules/@mdx-js/mdx": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.1.tgz", + "integrity": "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "acorn": "^8.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-scope": "^1.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "recma-build-jsx": "^1.0.0", + "recma-jsx": "^1.0.0", + "recma-stringify": "^1.0.0", + "rehype-recma": "^1.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/mdx/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.1.tgz", + "integrity": "sha512-f++rKLQgUVYDAtECQ6fn/is15GkEH9+nZPM3MS0RcxVqoTfawHvDlSCH7JbMhAM6uJ32v3eXLvLmLvjGu7PTQw==", + "license": "MIT", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@mermaid-js/parser": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-0.6.3.tgz", + "integrity": "sha512-lnjOhe7zyHjc+If7yT4zoedx2vo4sHaTmtkl1+or8BRTnCtDmcTpAjpzDSfCZrshM5bCoz0GyidzadJAH1xobA==", + "license": "MIT", + "dependencies": { + "langium": "3.3.1" + } + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmmirror.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -624,6 +1483,1366 @@ "node": ">= 8" } }, + "node_modules/@primer/octicons": { + "version": "19.21.1", + "resolved": "https://registry.npmjs.org/@primer/octicons/-/octicons-19.21.1.tgz", + "integrity": "sha512-7tgtBkCNcg75YJnckinzvES+uxysYQCe+CHSEnzr3VYgxttzKRvfmrnVogl3aEuHCQP4xhiE9k2lFDhYwGtTzQ==", + "license": "MIT", + "dependencies": { + "object-assign": "^4.1.1" + } + }, + "node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.10.tgz", + "integrity": "sha512-4kY9IVa6+9nJPsYmngK5Uk2kUmZnv7ChhHAFeQ5oaj8jrR1bIi3xww8nH71pz1/Ve4d/cXO3YxT8eikt1B0a8w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz", + "integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-effect-event": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz", + "integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + "license": "MIT" + }, + "node_modules/@rc-component/async-validator": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@rc-component/async-validator/-/async-validator-5.0.4.tgz", + "integrity": "sha512-qgGdcVIF604M9EqjNF0hbUTz42bz/RDtxWdWuU5EQe3hi7M8ob54B6B35rOsvX5eSvIHIzT9iH1R3n+hk3CGfg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.4" + }, + "engines": { + "node": ">=14.x" + } + }, + "node_modules/@rc-component/cascader": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@rc-component/cascader/-/cascader-1.10.0.tgz", + "integrity": "sha512-D1XOKvbhdo9kX+cG1p8qJOnSq+sMK3L84iVYjGQIx950kJt0ixN+Xac75ykyK/AC8V3GUanjNK14Qkv149RrEw==", + "license": "MIT", + "dependencies": { + "@rc-component/select": "~1.4.0", + "@rc-component/tree": "~1.1.0", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/checkbox": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@rc-component/checkbox/-/checkbox-1.0.1.tgz", + "integrity": "sha512-08yTH8m+bSm8TOqbybbJ9KiAuIATti6bDs2mVeSfu4QfEnyeF6X0enHVvD1NEAyuBWEAo56QtLe++MYs2D9XiQ==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/collapse": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@rc-component/collapse/-/collapse-1.1.2.tgz", + "integrity": "sha512-ilBYk1dLLJHu5Q74dF28vwtKUYQ42ZXIIDmqTuVy4rD8JQVvkXOs+KixVNbweyuIEtJYJ7+t+9GVD9dPc6N02w==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/motion": "^1.1.4", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/color-picker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@rc-component/color-picker/-/color-picker-3.0.3.tgz", + "integrity": "sha512-V7gFF9O7o5XwIWafdbOtqI4BUUkEUkgdBwp6favy3xajMX/2dDqytFaiXlcwrpq6aRyPLp5dKLAG5RFKLXMeGA==", + "license": "MIT", + "dependencies": { + "@ant-design/fast-color": "^3.0.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/context": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@rc-component/context/-/context-2.0.1.tgz", + "integrity": "sha512-HyZbYm47s/YqtP6pKXNMjPEMaukyg7P0qVfgMLzr7YiFNMHbK2fKTAGzms9ykfGHSfyf75nBbgWw+hHkp+VImw==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.3.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/dialog": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@rc-component/dialog/-/dialog-1.5.1.tgz", + "integrity": "sha512-by4Sf/a3azcb89WayWuwG19/Y312xtu8N81HoVQQtnsBDylfs+dog98fTAvLinnpeoWG52m/M7QLRW6fXR3l1g==", + "license": "MIT", + "dependencies": { + "@rc-component/motion": "^1.1.3", + "@rc-component/portal": "^2.0.0", + "@rc-component/util": "^1.0.1", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/drawer": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@rc-component/drawer/-/drawer-1.3.0.tgz", + "integrity": "sha512-rE+sdXEmv2W25VBQ9daGbnb4J4hBIEKmdbj0b3xpY+K7TUmLXDIlSnoXraIbFZdGyek9WxxGKK887uRnFgI+pQ==", + "license": "MIT", + "dependencies": { + "@rc-component/motion": "^1.1.4", + "@rc-component/portal": "^2.0.0", + "@rc-component/util": "^1.2.1", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/dropdown": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@rc-component/dropdown/-/dropdown-1.0.2.tgz", + "integrity": "sha512-6PY2ecUSYhDPhkNHHb4wfeAya04WhpmUSKzdR60G+kMNVUCX2vjT/AgTS0Lz0I/K6xrPMJ3enQbwVpeN3sHCgg==", + "license": "MIT", + "dependencies": { + "@rc-component/trigger": "^3.0.0", + "@rc-component/util": "^1.2.1", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.11.0", + "react-dom": ">=16.11.0" + } + }, + "node_modules/@rc-component/form": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@rc-component/form/-/form-1.6.0.tgz", + "integrity": "sha512-A7vrN8kExtw4sW06mrsgCb1rowhvBFFvQU6Bk/NL0Fj6Wet/5GF0QnGCxBu/sG3JI9FEhsJWES0D44BW2d0hzg==", + "license": "MIT", + "dependencies": { + "@rc-component/async-validator": "^5.0.3", + "@rc-component/util": "^1.5.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/image": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/@rc-component/image/-/image-1.5.3.tgz", + "integrity": "sha512-/NR7QW9uCN8Ugar+xsHZOPvzPySfEhcW2/vLcr7VPRM+THZMrllMRv7LAUgW7ikR+Z67Ab67cgPp5K5YftpJsQ==", + "license": "MIT", + "dependencies": { + "@rc-component/motion": "^1.0.0", + "@rc-component/portal": "^2.0.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/input": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@rc-component/input/-/input-1.1.2.tgz", + "integrity": "sha512-Q61IMR47piUBudgixJ30CciKIy9b1H95qe7GgEKOmSJVJXvFRWJllJfQry9tif+MX2cWFXWJf/RXz4kaCeq/Fg==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@rc-component/input-number": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/@rc-component/input-number/-/input-number-1.6.2.tgz", + "integrity": "sha512-Gjcq7meZlCOiWN1t1xCC+7/s85humHVokTBI7PJgTfoyw5OWF74y3e6P8PHX104g9+b54jsodFIzyaj6p8LI9w==", + "license": "MIT", + "dependencies": { + "@rc-component/mini-decimal": "^1.0.1", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/mentions": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@rc-component/mentions/-/mentions-1.6.0.tgz", + "integrity": "sha512-KIkQNP6habNuTsLhUv0UGEOwG67tlmE7KNIJoQZZNggEZl5lQJTytFDb69sl5CK3TDdISCTjKP3nGEBKgT61CQ==", + "license": "MIT", + "dependencies": { + "@rc-component/input": "~1.1.0", + "@rc-component/menu": "~1.2.0", + "@rc-component/textarea": "~1.1.0", + "@rc-component/trigger": "^3.0.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/menu": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@rc-component/menu/-/menu-1.2.0.tgz", + "integrity": "sha512-VWwDuhvYHSnTGj4n6bV3ISrLACcPAzdPOq3d0BzkeiM5cve8BEYfvkEhNoM0PLzv51jpcejeyrLXeMVIJ+QJlg==", + "license": "MIT", + "dependencies": { + "@rc-component/motion": "^1.1.4", + "@rc-component/overflow": "^1.0.0", + "@rc-component/trigger": "^3.0.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/mini-decimal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rc-component/mini-decimal/-/mini-decimal-1.1.0.tgz", + "integrity": "sha512-jS4E7T9Li2GuYwI6PyiVXmxTiM6b07rlD9Ge8uGZSCz3WlzcG5ZK7g5bbuKNeZ9pgUuPK/5guV781ujdVpm4HQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0" + }, + "engines": { + "node": ">=8.x" + } + }, + "node_modules/@rc-component/motion": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@rc-component/motion/-/motion-1.1.6.tgz", + "integrity": "sha512-aEQobs/YA0kqRvHIPjQvOytdtdRVyhf/uXAal4chBjxDu6odHckExJzjn2D+Ju1aKK6hx3pAs6BXdV9+86xkgQ==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.2.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/mutate-observer": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@rc-component/mutate-observer/-/mutate-observer-2.0.1.tgz", + "integrity": "sha512-AyarjoLU5YlxuValRi+w8JRH2Z84TBbFO2RoGWz9d8bSu0FqT8DtugH3xC3BV7mUwlmROFauyWuXFuq4IFbH+w==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.2.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/notification": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@rc-component/notification/-/notification-1.2.0.tgz", + "integrity": "sha512-OX3J+zVU7rvoJCikjrfW7qOUp7zlDeFBK2eA3SFbGSkDqo63Sl4Ss8A04kFP+fxHSxMDIS9jYVEZtU1FNCFuBA==", + "license": "MIT", + "dependencies": { + "@rc-component/motion": "^1.1.4", + "@rc-component/util": "^1.2.1", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/overflow": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@rc-component/overflow/-/overflow-1.0.0.tgz", + "integrity": "sha512-GSlBeoE0XTBi5cf3zl8Qh7Uqhn7v8RrlJ8ajeVpEkNe94HWy5l5BQ0Mwn2TVUq9gdgbfEMUmTX7tJFAg7mz0Rw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@rc-component/resize-observer": "^1.0.1", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/pagination": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@rc-component/pagination/-/pagination-1.2.0.tgz", + "integrity": "sha512-YcpUFE8dMLfSo6OARJlK6DbHHvrxz7pMGPGmC/caZSJJz6HRKHC1RPP001PRHCvG9Z/veD039uOQmazVuLJzlw==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/picker": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@rc-component/picker/-/picker-1.9.0.tgz", + "integrity": "sha512-OLisdk8AWVCG9goBU1dWzuH5QlBQk8jktmQ6p0/IyBFwdKGwyIZOSjnBYo8hooHiTdl0lU+wGf/OfMtVBw02KQ==", + "license": "MIT", + "dependencies": { + "@rc-component/overflow": "^1.0.0", + "@rc-component/resize-observer": "^1.0.0", + "@rc-component/trigger": "^3.6.15", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=12.x" + }, + "peerDependencies": { + "date-fns": ">= 2.x", + "dayjs": ">= 1.x", + "luxon": ">= 3.x", + "moment": ">= 2.x", + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + }, + "peerDependenciesMeta": { + "date-fns": { + "optional": true + }, + "dayjs": { + "optional": true + }, + "luxon": { + "optional": true + }, + "moment": { + "optional": true + } + } + }, + "node_modules/@rc-component/portal": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@rc-component/portal/-/portal-2.2.0.tgz", + "integrity": "sha512-oc6FlA+uXCMiwArHsJyHcIkX4q6uKyndrPol2eWX8YPkAnztHOPsFIRtmWG4BMlGE5h7YIRE3NiaJ5VS8Lb1QQ==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.2.1", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=12.x" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/progress": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@rc-component/progress/-/progress-1.0.2.tgz", + "integrity": "sha512-WZUnH9eGxH1+xodZKqdrHke59uyGZSWgj5HBM5Kwk5BrTMuAORO7VJ2IP5Qbm9aH3n9x3IcesqHHR0NWPBC7fQ==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.2.1", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/qrcode": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@rc-component/qrcode/-/qrcode-1.1.1.tgz", + "integrity": "sha512-LfLGNymzKdUPjXUbRP+xOhIWY4jQ+YMj5MmWAcgcAq1Ij8XP7tRmAXqyuv96XvLUBE/5cA8hLFl9eO1JQMujrA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.7" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/rate": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@rc-component/rate/-/rate-1.0.1.tgz", + "integrity": "sha512-bkXxeBqDpl5IOC7yL7GcSYjQx9G8H+6kLYQnNZWeBYq2OYIv1MONd6mqKTjnnJYpV0cQIU2z3atdW0j1kttpTw==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/resize-observer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@rc-component/resize-observer/-/resize-observer-1.0.1.tgz", + "integrity": "sha512-r+w+Mz1EiueGk1IgjB3ptNXLYSLZ5vnEfKHH+gfgj7JMupftyzvUUl3fRcMZe5uMM04x0n8+G2o/c6nlO2+Wag==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/segmented": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@rc-component/segmented/-/segmented-1.3.0.tgz", + "integrity": "sha512-5J/bJ01mbDnoA6P/FW8SxUvKn+OgUSTZJPzCNnTBntG50tzoP7DydGhqxp7ggZXZls7me3mc2EQDXakU3iTVFg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@rc-component/motion": "^1.1.4", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@rc-component/select": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@rc-component/select/-/select-1.4.0.tgz", + "integrity": "sha512-DDCsUkx3lHAO42fyPiBADzZgbqOp3gepjBCusuy6DDN51Vx73cwX0aqsid1asxpIwHPMYGgYg+wXbLi4YctzLQ==", + "license": "MIT", + "dependencies": { + "@rc-component/overflow": "^1.0.0", + "@rc-component/trigger": "^3.0.0", + "@rc-component/util": "^1.3.0", + "@rc-component/virtual-list": "^1.0.1", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@rc-component/slider": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@rc-component/slider/-/slider-1.0.1.tgz", + "integrity": "sha512-uDhEPU1z3WDfCJhaL9jfd2ha/Eqpdfxsn0Zb0Xcq1NGQAman0TWaR37OWp2vVXEOdV2y0njSILTMpTfPV1454g==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/steps": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@rc-component/steps/-/steps-1.2.2.tgz", + "integrity": "sha512-/yVIZ00gDYYPHSY0JP+M+s3ZvuXLu2f9rEjQqiUDs7EcYsUYrpJ/1bLj9aI9R7MBR3fu/NGh6RM9u2qGfqp+Nw==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.2.1", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/switch": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@rc-component/switch/-/switch-1.0.3.tgz", + "integrity": "sha512-Jgi+EbOBquje/XNdofr7xbJQZPYJP+BlPfR0h+WN4zFkdtB2EWqEfvkXJWeipflwjWip0/17rNbxEAqs8hVHfw==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/table": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@rc-component/table/-/table-1.9.0.tgz", + "integrity": "sha512-cq3P9FkD+F3eglkFYhBuNlHclg+r4jY8+ZIgK7zbEFo6IwpnA77YL/Gq4ensLw9oua3zFCTA6JDu6YgBei0TxA==", + "license": "MIT", + "dependencies": { + "@rc-component/context": "^2.0.1", + "@rc-component/resize-observer": "^1.0.0", + "@rc-component/util": "^1.1.0", + "@rc-component/virtual-list": "^1.0.1", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/tabs": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@rc-component/tabs/-/tabs-1.7.0.tgz", + "integrity": "sha512-J48cs2iBi7Ho3nptBxxIqizEliUC+ExE23faspUQKGQ550vaBlv3aGF8Epv/UB1vFWeoJDTW/dNzgIU0Qj5i/w==", + "license": "MIT", + "dependencies": { + "@rc-component/dropdown": "~1.0.0", + "@rc-component/menu": "~1.2.0", + "@rc-component/motion": "^1.1.3", + "@rc-component/resize-observer": "^1.0.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/textarea": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@rc-component/textarea/-/textarea-1.1.2.tgz", + "integrity": "sha512-9rMUEODWZDMovfScIEHXWlVZuPljZ2pd1LKNjslJVitn4SldEzq5vO1CL3yy3Dnib6zZal2r2DPtjy84VVpF6A==", + "license": "MIT", + "dependencies": { + "@rc-component/input": "~1.1.0", + "@rc-component/resize-observer": "^1.0.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/tooltip": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@rc-component/tooltip/-/tooltip-1.4.0.tgz", + "integrity": "sha512-8Rx5DCctIlLI4raR0I0xHjVTf1aF48+gKCNeAAo5bmF5VoR5YED+A/XEqzXv9KKqrJDRcd3Wndpxh2hyzrTtSg==", + "license": "MIT", + "dependencies": { + "@rc-component/trigger": "^3.7.1", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/tour": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@rc-component/tour/-/tour-2.2.1.tgz", + "integrity": "sha512-BUCrVikGJsXli38qlJ+h2WyDD6dYxzDA9dV3o0ij6gYhAq6ooT08SUMWOikva9v4KZ2BEuluGl5bPcsjrSoBgQ==", + "license": "MIT", + "dependencies": { + "@rc-component/portal": "^2.0.0", + "@rc-component/trigger": "^3.0.0", + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/tree": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rc-component/tree/-/tree-1.1.0.tgz", + "integrity": "sha512-HZs3aOlvFgQdgrmURRc/f4IujiNBf4DdEeXUlkS0lPoLlx9RoqsZcF0caXIAMVb+NaWqKtGQDnrH8hqLCN5zlA==", + "license": "MIT", + "dependencies": { + "@rc-component/motion": "^1.0.0", + "@rc-component/util": "^1.2.1", + "@rc-component/virtual-list": "^1.0.1", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=10.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@rc-component/tree-select": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@rc-component/tree-select/-/tree-select-1.5.0.tgz", + "integrity": "sha512-1nBAMreFJXkCIeZlWG0l+6i0jLWzlmmRv/TrtZjLkoq8WmpzSuDhP32YroC7rAhGFR34thpHkvCedPzBXIL/XQ==", + "license": "MIT", + "dependencies": { + "@rc-component/select": "~1.4.0", + "@rc-component/tree": "~1.1.0", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@rc-component/trigger": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/@rc-component/trigger/-/trigger-3.7.2.tgz", + "integrity": "sha512-25x+D2k9SAkaK/MNMNmv2Nlv8FH1D9RtmjoMoLEw1Cid+sMV4pAAT5k49ku59UeXaOA1qwLUVrBUMq4A6gUSsQ==", + "license": "MIT", + "dependencies": { + "@rc-component/motion": "^1.1.4", + "@rc-component/portal": "^2.0.0", + "@rc-component/resize-observer": "^1.0.0", + "@rc-component/util": "^1.2.1", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/upload": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rc-component/upload/-/upload-1.1.0.tgz", + "integrity": "sha512-LIBV90mAnUE6VK5N4QvForoxZc4XqEYZimcp7fk+lkE4XwHHyJWxpIXQQwMU8hJM+YwBbsoZkGksL1sISWHQxw==", + "license": "MIT", + "dependencies": { + "@rc-component/util": "^1.3.0", + "clsx": "^2.1.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/util": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@rc-component/util/-/util-1.7.0.tgz", + "integrity": "sha512-tIvIGj4Vl6fsZFvWSkYw9sAfiCKUXMyhVz6kpKyZbwyZyRPqv2vxYZROdaO1VB4gqTNvUZFXh6i3APUiterw5g==", + "license": "MIT", + "dependencies": { + "is-mobile": "^5.0.0", + "react-is": "^18.2.0" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rc-component/virtual-list": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@rc-component/virtual-list/-/virtual-list-1.0.2.tgz", + "integrity": "sha512-uvTol/mH74FYsn5loDGJxo+7kjkO4i+y4j87Re1pxJBs0FaeuMuLRzQRGaXwnMcV1CxpZLi2Z56Rerj2M00fjQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.0", + "@rc-component/resize-observer": "^1.0.1", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.53.5", "resolved": "https://registry.npmmirror.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.5.tgz", @@ -932,13 +3151,378 @@ "win32" ] }, + "node_modules/@shikijs/core": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.20.0.tgz", + "integrity": "sha512-f2ED7HYV4JEk827mtMDwe/yQ25pRiXZmtHjWF8uzZKuKiEsJR7Ce1nuQ+HhV9FzDcbIo4ObBCD9GPTzNuy9S1g==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.20.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4", + "hast-util-to-html": "^9.0.5" + } + }, + "node_modules/@shikijs/engine-javascript": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.20.0.tgz", + "integrity": "sha512-OFx8fHAZuk7I42Z9YAdZ95To6jDePQ9Rnfbw9uSRTSbBhYBp1kEOKv/3jOimcj3VRUKusDYM6DswLauwfhboLg==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.20.0", + "@shikijs/vscode-textmate": "^10.0.2", + "oniguruma-to-es": "^4.3.4" + } + }, + "node_modules/@shikijs/engine-oniguruma": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.20.0.tgz", + "integrity": "sha512-Yx3gy7xLzM0ZOjqoxciHjA7dAt5tyzJE3L4uQoM83agahy+PlW244XJSrmJRSBvGYELDhYXPacD4R/cauV5bzQ==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.20.0", + "@shikijs/vscode-textmate": "^10.0.2" + } + }, + "node_modules/@shikijs/langs": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.20.0.tgz", + "integrity": "sha512-le+bssCxcSHrygCWuOrYJHvjus6zhQ2K7q/0mgjiffRbkhM4o1EWu2m+29l0yEsHDbWaWPNnDUTRVVBvBBeKaA==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.20.0" + } + }, + "node_modules/@shikijs/themes": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.20.0.tgz", + "integrity": "sha512-U1NSU7Sl26Q7ErRvJUouArxfM2euWqq1xaSrbqMu2iqa+tSp0D1Yah8216sDYbdDHw4C8b75UpE65eWorm2erQ==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.20.0" + } + }, + "node_modules/@shikijs/transformers": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/@shikijs/transformers/-/transformers-3.20.0.tgz", + "integrity": "sha512-PrHHMRr3Q5W1qB/42kJW6laqFyWdhrPF2hNR9qjOm1xcSiAO3hAHo7HaVyHE6pMyevmy3i51O8kuGGXC78uK3g==", + "license": "MIT", + "dependencies": { + "@shikijs/core": "3.20.0", + "@shikijs/types": "3.20.0" + } + }, + "node_modules/@shikijs/types": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.20.0.tgz", + "integrity": "sha512-lhYAATn10nkZcBQ0BlzSbJA3wcmL5MXUUF8d2Zzon6saZDlToKaiRX60n2+ZaHJCmXEcZRWNzn+k9vplr8Jhsw==", + "license": "MIT", + "dependencies": { + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@shikijs/vscode-textmate": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-10.0.2.tgz", + "integrity": "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==", + "license": "MIT" + }, + "node_modules/@splinetool/runtime": { + "version": "0.9.526", + "resolved": "https://registry.npmjs.org/@splinetool/runtime/-/runtime-0.9.526.tgz", + "integrity": "sha512-qznHbXA5aKwDbCgESAothCNm1IeEZcmNWG145p5aXj4w5uoqR1TZ9qkTHTKLTsUbHeitCwdhzmRqan1kxboLgQ==", + "dependencies": { + "on-change": "^4.0.0", + "semver-compare": "^1.0.0" + } + }, + "node_modules/@stitches/react": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@stitches/react/-/react-1.2.8.tgz", + "integrity": "sha512-9g9dWI4gsSVe8bNLlb+lMkBYsnIKCZTmvqvDG+Avnn69XfmHZKiaMrx7cgTaddq7aTPPmXiTsbFcUy0xgI4+wA==", + "license": "MIT", + "peerDependencies": { + "react": ">= 16.3.0" + } + }, + "node_modules/@types/d3": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", + "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/d3-axis": "*", + "@types/d3-brush": "*", + "@types/d3-chord": "*", + "@types/d3-color": "*", + "@types/d3-contour": "*", + "@types/d3-delaunay": "*", + "@types/d3-dispatch": "*", + "@types/d3-drag": "*", + "@types/d3-dsv": "*", + "@types/d3-ease": "*", + "@types/d3-fetch": "*", + "@types/d3-force": "*", + "@types/d3-format": "*", + "@types/d3-geo": "*", + "@types/d3-hierarchy": "*", + "@types/d3-interpolate": "*", + "@types/d3-path": "*", + "@types/d3-polygon": "*", + "@types/d3-quadtree": "*", + "@types/d3-random": "*", + "@types/d3-scale": "*", + "@types/d3-scale-chromatic": "*", + "@types/d3-selection": "*", + "@types/d3-shape": "*", + "@types/d3-time": "*", + "@types/d3-time-format": "*", + "@types/d3-timer": "*", + "@types/d3-transition": "*", + "@types/d3-zoom": "*" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", + "license": "MIT" + }, + "node_modules/@types/d3-axis": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", + "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-brush": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", + "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-chord": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", + "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-contour": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", + "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==", + "license": "MIT" + }, + "node_modules/@types/d3-dispatch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz", + "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-dsv": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", + "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==", + "license": "MIT" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-fetch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", + "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", + "license": "MIT", + "dependencies": { + "@types/d3-dsv": "*" + } + }, + "node_modules/@types/d3-force": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz", + "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==", + "license": "MIT" + }, + "node_modules/@types/d3-format": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", + "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==", + "license": "MIT" + }, + "node_modules/@types/d3-geo": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", + "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", + "license": "MIT", + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-hierarchy": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", + "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==", + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-polygon": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", + "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==", + "license": "MIT" + }, + "node_modules/@types/d3-quadtree": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", + "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==", + "license": "MIT" + }, + "node_modules/@types/d3-random": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", + "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==", + "license": "MIT" + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-shape": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz", + "integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-time-format": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", + "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, "node_modules/@types/estree": { "version": "1.0.8", "resolved": "https://registry.npmmirror.com/@types/estree/-/estree-1.0.8.tgz", "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", - "dev": true, "license": "MIT" }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, "node_modules/@types/file-saver": { "version": "2.0.7", "resolved": "https://registry.npmmirror.com/@types/file-saver/-/file-saver-2.0.7.tgz", @@ -946,22 +3530,123 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/geojson": { + "version": "7946.0.16", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz", + "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/js-cookie": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/js-cookie/-/js-cookie-3.0.6.tgz", + "integrity": "sha512-wkw9yd1kEXOPnvEeEV1Go1MmxtBJL0RR79aOTAApecWFVu7w0NNXNqhcWgvw2YgZDYadliXkl14pa3WXw5jlCQ==", + "license": "MIT" + }, + "node_modules/@types/katex": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@types/katex/-/katex-0.16.7.tgz", + "integrity": "sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==", + "license": "MIT" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + "license": "MIT" + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, "node_modules/@types/node": { "version": "20.19.27", "resolved": "https://registry.npmmirror.com/@types/node/-/node-20.19.27.tgz", "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~6.21.0" } }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "19.2.7", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz", + "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==", + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "license": "MIT" + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, "node_modules/@types/web-bluetooth": { "version": "0.0.20", "resolved": "https://registry.npmmirror.com/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz", "integrity": "sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==", "license": "MIT" }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@use-gesture/core": { + "version": "10.3.1", + "resolved": "https://registry.npmjs.org/@use-gesture/core/-/core-10.3.1.tgz", + "integrity": "sha512-WcINiDt8WjqBdUXye25anHiNxPc0VOrlT8F6LLkU6cycrOGUDyY/yyFmsg3k8i5OLvv25llc0QC45GhR/C8llw==", + "license": "MIT" + }, + "node_modules/@use-gesture/react": { + "version": "10.3.1", + "resolved": "https://registry.npmjs.org/@use-gesture/react/-/react-10.3.1.tgz", + "integrity": "sha512-Yy19y6O2GJq8f7CHf7L0nxL8bf4PZCPaVOCgJrusOeFHY1LvHgYXnmnXg6N5iwAnbgbZCDjo60SiM6IPJi9C5g==", + "license": "MIT", + "dependencies": { + "@use-gesture/core": "10.3.1" + }, + "peerDependencies": { + "react": ">= 16.8.0" + } + }, "node_modules/@vitejs/plugin-vue": { "version": "5.2.4", "resolved": "https://registry.npmmirror.com/@vitejs/plugin-vue/-/plugin-vue-5.2.4.tgz", @@ -1183,6 +3868,28 @@ "url": "https://github.com/sponsors/antfu" } }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, "node_modules/adler-32": { "version": "1.3.1", "resolved": "https://registry.npmmirror.com/adler-32/-/adler-32-1.3.1.tgz", @@ -1192,6 +3899,28 @@ "node": ">=0.8" } }, + "node_modules/ahooks": { + "version": "3.9.6", + "resolved": "https://registry.npmjs.org/ahooks/-/ahooks-3.9.6.tgz", + "integrity": "sha512-Mr7f05swd5SmKlR9SZo5U6M0LsL4ErweLzpdgXjA1JPmnZ78Vr6wzx0jUtvoxrcqGKYnX0Yjc02iEASVxHFPjQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.21.0", + "@types/js-cookie": "^3.0.6", + "dayjs": "^1.9.1", + "intersection-observer": "^0.12.0", + "js-cookie": "^3.0.5", + "lodash": "^4.17.21", + "react-fast-compare": "^3.2.2", + "resize-observer-polyfill": "^1.5.1", + "screenfull": "^5.0.0", + "tslib": "^2.4.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, "node_modules/alien-signals": { "version": "1.0.13", "resolved": "https://registry.npmmirror.com/alien-signals/-/alien-signals-1.0.13.tgz", @@ -1212,6 +3941,91 @@ "url": "https://github.com/chalk/ansi-regex?sponsor=1" } }, + "node_modules/antd": { + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/antd/-/antd-6.1.3.tgz", + "integrity": "sha512-kvaLtOm0UwCIdtR424/Mo6pyJxN34/6003e1io3GIKWQOdlddplFylv767iGxXLMrxfNoQmxuNJcF1miFbxCZQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "@ant-design/colors": "^8.0.0", + "@ant-design/cssinjs": "^2.0.1", + "@ant-design/cssinjs-utils": "^2.0.2", + "@ant-design/fast-color": "^3.0.0", + "@ant-design/icons": "^6.1.0", + "@ant-design/react-slick": "~2.0.0", + "@babel/runtime": "^7.28.4", + "@rc-component/cascader": "~1.10.0", + "@rc-component/checkbox": "~1.0.1", + "@rc-component/collapse": "~1.1.2", + "@rc-component/color-picker": "~3.0.3", + "@rc-component/dialog": "~1.5.1", + "@rc-component/drawer": "~1.3.0", + "@rc-component/dropdown": "~1.0.2", + "@rc-component/form": "~1.6.0", + "@rc-component/image": "~1.5.3", + "@rc-component/input": "~1.1.2", + "@rc-component/input-number": "~1.6.2", + "@rc-component/mentions": "~1.6.0", + "@rc-component/menu": "~1.2.0", + "@rc-component/motion": "~1.1.6", + "@rc-component/mutate-observer": "^2.0.1", + "@rc-component/notification": "~1.2.0", + "@rc-component/pagination": "~1.2.0", + "@rc-component/picker": "~1.9.0", + "@rc-component/progress": "~1.0.2", + "@rc-component/qrcode": "~1.1.1", + "@rc-component/rate": "~1.0.1", + "@rc-component/resize-observer": "^1.0.1", + "@rc-component/segmented": "~1.3.0", + "@rc-component/select": "~1.4.0", + "@rc-component/slider": "~1.0.1", + "@rc-component/steps": "~1.2.2", + "@rc-component/switch": "~1.0.3", + "@rc-component/table": "~1.9.0", + "@rc-component/tabs": "~1.7.0", + "@rc-component/textarea": "~1.1.2", + "@rc-component/tooltip": "~1.4.0", + "@rc-component/tour": "~2.2.1", + "@rc-component/tree": "~1.1.0", + "@rc-component/tree-select": "~1.5.0", + "@rc-component/trigger": "^3.7.2", + "@rc-component/upload": "~1.1.0", + "@rc-component/util": "^1.6.2", + "clsx": "^2.1.1", + "dayjs": "^1.11.11", + "scroll-into-view-if-needed": "^3.1.0", + "throttle-debounce": "^5.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ant-design" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/antd-style": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/antd-style/-/antd-style-4.1.0.tgz", + "integrity": "sha512-vnPBGg0OVlSz90KRYZhxd89aZiOImTiesF+9MQqN8jsLGZUQTjbP04X9jTdEfsztKUuMbBWg/RmB/wHTakbtMQ==", + "license": "MIT", + "dependencies": { + "@ant-design/cssinjs": "^2.0.0", + "@babel/runtime": "^7.24.1", + "@emotion/cache": "^11.11.0", + "@emotion/css": "^11.11.2", + "@emotion/react": "^11.11.4", + "@emotion/serialize": "^1.1.3", + "@emotion/utils": "^1.2.1", + "use-merge-value": "^1.2.0" + }, + "peerDependencies": { + "antd": ">=6.0.0", + "react": ">=18" + } + }, "node_modules/any-promise": { "version": "1.3.0", "resolved": "https://registry.npmmirror.com/any-promise/-/any-promise-1.3.0.tgz", @@ -1240,12 +4054,39 @@ "dev": true, "license": "MIT" }, + "node_modules/assign-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", + "integrity": "sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "license": "MIT", + "bin": { + "astring": "bin/astring" + } + }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmmirror.com/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", "license": "MIT" }, + "node_modules/attr-accept": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/attr-accept/-/attr-accept-2.2.5.tgz", + "integrity": "sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/autoprefixer": { "version": "10.4.23", "resolved": "https://registry.npmmirror.com/autoprefixer/-/autoprefixer-10.4.23.tgz", @@ -1294,6 +4135,31 @@ "proxy-from-env": "^1.1.0" } }, + "node_modules/babel-plugin-macros": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz", + "integrity": "sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5", + "cosmiconfig": "^7.0.0", + "resolve": "^1.19.0" + }, + "engines": { + "node": ">=10", + "npm": ">=6" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmmirror.com/balanced-match/-/balanced-match-1.0.2.tgz", @@ -1367,6 +4233,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -1394,6 +4261,15 @@ "node": ">= 0.4" } }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/camelcase-css": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/camelcase-css/-/camelcase-css-2.0.1.tgz", @@ -1425,6 +4301,16 @@ ], "license": "CC-BY-4.0" }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/cfb": { "version": "1.2.2", "resolved": "https://registry.npmmirror.com/cfb/-/cfb-1.2.2.tgz", @@ -1438,11 +4324,52 @@ "node": ">=0.8" } }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/chart.js": { "version": "4.5.1", "resolved": "https://registry.npmmirror.com/chart.js/-/chart.js-4.5.1.tgz", "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", "license": "MIT", + "peer": true, "dependencies": { "@kurkle/color": "^0.3.0" }, @@ -1450,6 +4377,38 @@ "pnpm": ">=8" } }, + "node_modules/chevrotain": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.0.3.tgz", + "integrity": "sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/cst-dts-gen": "11.0.3", + "@chevrotain/gast": "11.0.3", + "@chevrotain/regexp-to-ast": "11.0.3", + "@chevrotain/types": "11.0.3", + "@chevrotain/utils": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/chevrotain-allstar": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/chevrotain-allstar/-/chevrotain-allstar-0.3.1.tgz", + "integrity": "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==", + "license": "MIT", + "dependencies": { + "lodash-es": "^4.17.21" + }, + "peerDependencies": { + "chevrotain": "^11.0.0" + } + }, + "node_modules/chevrotain/node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + }, "node_modules/chokidar": { "version": "3.6.0", "resolved": "https://registry.npmmirror.com/chokidar/-/chokidar-3.6.0.tgz", @@ -1488,6 +4447,39 @@ "node": ">= 6" } }, + "node_modules/chroma-js": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/chroma-js/-/chroma-js-3.2.0.tgz", + "integrity": "sha512-os/OippSlX1RlWWr+QDPcGUZs0uoqr32urfxESG9U93lhUfbnlyckte84Q8P1UQY/qth983AS1JONKmLS4T0nw==", + "license": "(BSD-3-Clause AND Apache-2.0)" + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/classnames": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", + "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==", + "license": "MIT" + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/codepage": { "version": "1.15.0", "resolved": "https://registry.npmmirror.com/codepage/-/codepage-1.15.0.tgz", @@ -1497,6 +4489,22 @@ "node": ">=0.8" } }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", + "license": "MIT" + }, "node_modules/combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmmirror.com/combined-stream/-/combined-stream-1.0.8.tgz", @@ -1509,6 +4517,16 @@ "node": ">= 0.8" } }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/commander": { "version": "4.1.1", "resolved": "https://registry.npmmirror.com/commander/-/commander-4.1.1.tgz", @@ -1519,6 +4537,58 @@ "node": ">= 6" } }, + "node_modules/compute-scroll-into-view": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.1.1.tgz", + "integrity": "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw==", + "license": "MIT" + }, + "node_modules/confbox": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz", + "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==", + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "license": "MIT" + }, + "node_modules/cose-base": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", + "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", + "license": "MIT", + "dependencies": { + "layout-base": "^1.0.0" + } + }, + "node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "license": "MIT", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cosmiconfig/node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, "node_modules/crc-32": { "version": "1.2.2", "resolved": "https://registry.npmmirror.com/crc-32/-/crc-32-1.2.2.tgz", @@ -1550,6 +4620,522 @@ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", "license": "MIT" }, + "node_modules/cytoscape": { + "version": "3.33.1", + "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.33.1.tgz", + "integrity": "sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/cytoscape-cose-bilkent": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", + "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^1.0.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", + "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^2.2.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/cose-base": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", + "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", + "license": "MIT", + "dependencies": { + "layout-base": "^2.0.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/layout-base": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", + "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==", + "license": "MIT" + }, + "node_modules/d3": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", + "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", + "license": "ISC", + "dependencies": { + "d3-array": "3", + "d3-axis": "3", + "d3-brush": "3", + "d3-chord": "3", + "d3-color": "3", + "d3-contour": "4", + "d3-delaunay": "6", + "d3-dispatch": "3", + "d3-drag": "3", + "d3-dsv": "3", + "d3-ease": "3", + "d3-fetch": "3", + "d3-force": "3", + "d3-format": "3", + "d3-geo": "3", + "d3-hierarchy": "3", + "d3-interpolate": "3", + "d3-path": "3", + "d3-polygon": "3", + "d3-quadtree": "3", + "d3-random": "3", + "d3-scale": "4", + "d3-scale-chromatic": "3", + "d3-selection": "3", + "d3-shape": "3", + "d3-time": "3", + "d3-time-format": "4", + "d3-timer": "3", + "d3-transition": "3", + "d3-zoom": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-axis": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", + "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-brush": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", + "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "3", + "d3-transition": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-chord": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", + "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", + "license": "ISC", + "dependencies": { + "d3-path": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-contour": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", + "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", + "license": "ISC", + "dependencies": { + "d3-array": "^3.2.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", + "license": "ISC", + "dependencies": { + "delaunator": "5" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dsv": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", + "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", + "license": "ISC", + "dependencies": { + "commander": "7", + "iconv-lite": "0.6", + "rw": "1" + }, + "bin": { + "csv2json": "bin/dsv2json.js", + "csv2tsv": "bin/dsv2dsv.js", + "dsv2dsv": "bin/dsv2dsv.js", + "dsv2json": "bin/dsv2json.js", + "json2csv": "bin/json2dsv.js", + "json2dsv": "bin/json2dsv.js", + "json2tsv": "bin/json2dsv.js", + "tsv2csv": "bin/dsv2dsv.js", + "tsv2json": "bin/dsv2json.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dsv/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-fetch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", + "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", + "license": "ISC", + "dependencies": { + "d3-dsv": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-force": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", + "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-quadtree": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", + "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-geo": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", + "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2.5.0 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-hierarchy": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", + "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-polygon": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", + "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-quadtree": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", + "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-random": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", + "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-sankey": { + "version": "0.12.3", + "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz", + "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "1 - 2", + "d3-shape": "^1.2.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-array": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz", + "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==", + "license": "BSD-3-Clause", + "dependencies": { + "internmap": "^1.0.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz", + "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==", + "license": "BSD-3-Clause" + }, + "node_modules/d3-sankey/node_modules/d3-shape": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz", + "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-path": "1" + } + }, + "node_modules/d3-sankey/node_modules/internmap": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz", + "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==", + "license": "ISC" + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-interpolate": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "peer": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/dagre-d3-es": { + "version": "7.0.13", + "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.13.tgz", + "integrity": "sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==", + "license": "MIT", + "dependencies": { + "d3": "^7.9.0", + "lodash-es": "^4.17.21" + } + }, + "node_modules/dayjs": { + "version": "1.11.19", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.19.tgz", + "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==", + "license": "MIT" + }, "node_modules/de-indent": { "version": "1.0.2", "resolved": "https://registry.npmmirror.com/de-indent/-/de-indent-1.0.2.tgz", @@ -1557,6 +5143,54 @@ "dev": true, "license": "MIT" }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz", + "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decode-uri-component": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.4.1.tgz", + "integrity": "sha512-+8VxcR21HhTy8nOt6jf20w0c9CADrw1O8d+VZ/YzzCt4bJ3uBjw+D1q2osAB8RnpwwaeYBxy0HyKQxD5JBMuuQ==", + "license": "MIT", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/delaunator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", + "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", + "license": "ISC", + "dependencies": { + "robust-predicates": "^3.0.2" + } + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmmirror.com/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -1566,6 +5200,28 @@ "node": ">=0.4.0" } }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/didyoumean": { "version": "1.2.2", "resolved": "https://registry.npmmirror.com/didyoumean/-/didyoumean-1.2.2.tgz", @@ -1580,6 +5236,15 @@ "dev": true, "license": "MIT" }, + "node_modules/dompurify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.1.tgz", + "integrity": "sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==", + "license": "(MPL-2.0 OR Apache-2.0)", + "optionalDependencies": { + "@types/trusted-types": "^2.0.7" + } + }, "node_modules/driver.js": { "version": "1.4.0", "resolved": "https://registry.npmmirror.com/driver.js/-/driver.js-1.4.0.tgz", @@ -1607,6 +5272,19 @@ "dev": true, "license": "ISC" }, + "node_modules/emoji-mart": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/emoji-mart/-/emoji-mart-5.6.0.tgz", + "integrity": "sha512-eJp3QRe79pjwa+duv+n7+5YsNhRcMl812EcFVwrnRvYKoNPoQb5qxU8DG6Bgwji0akHdp6D4Ln6tYLG58MFSow==", + "license": "MIT", + "peer": true + }, + "node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "license": "MIT" + }, "node_modules/entities": { "version": "4.5.0", "resolved": "https://registry.npmmirror.com/entities/-/entities-4.5.0.tgz", @@ -1619,6 +5297,15 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, "node_modules/es-define-property": { "version": "1.0.1", "resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz", @@ -1664,6 +5351,48 @@ "node": ">= 0.4" } }, + "node_modules/es-toolkit": { + "version": "1.43.0", + "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.43.0.tgz", + "integrity": "sha512-SKCT8AsWvYzBBuUqMk4NPwFlSdqLpJwmy6AP322ERn8W2YLIB6JBXnwMI2Qsh2gfphT3q7EKAxKb23cvFHFwKA==", + "license": "MIT", + "workspaces": [ + "docs", + "benchmarks" + ] + }, + "node_modules/esast-util-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz", + "integrity": "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esast-util-from-js": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz", + "integrity": "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "acorn": "^8.0.0", + "esast-util-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/esbuild": { "version": "0.21.5", "resolved": "https://registry.npmmirror.com/esbuild/-/esbuild-0.21.5.tgz", @@ -1713,12 +5442,148 @@ "node": ">=6" } }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-scope": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz", + "integrity": "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmmirror.com/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", "license": "MIT" }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "license": "MIT", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/extend-shallow/node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, "node_modules/fast-glob": { "version": "3.3.3", "resolved": "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.3.tgz", @@ -1765,6 +5630,18 @@ "integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==", "license": "MIT" }, + "node_modules/file-selector": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/file-selector/-/file-selector-0.5.0.tgz", + "integrity": "sha512-s8KNnmIDTBoD0p9uJ9uD0XY38SCeBOtj0UMXyQSLg1Ypfrfj8+dAvwsLjYQkQ2GjhVtp2HrnF5cJzMhBjfD8HA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.3" + }, + "engines": { + "node": ">= 10" + } + }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-7.1.1.tgz", @@ -1778,6 +5655,24 @@ "node": ">=8" } }, + "node_modules/filter-obj": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-5.1.0.tgz", + "integrity": "sha512-qWeTREPoT7I0bifpPUXtxkZJ1XJzxWtfoWWkdVGqa+eCr3SHW/Ocp89o8vLvbUuQnadybJpjOKu4V+RwO6sGng==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-root": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", + "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==", + "license": "MIT" + }, "node_modules/follow-redirects": { "version": "1.15.11", "resolved": "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.15.11.tgz", @@ -1798,6 +5693,15 @@ } } }, + "node_modules/for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/form-data": { "version": "4.0.5", "resolved": "https://registry.npmmirror.com/form-data/-/form-data-4.0.5.tgz", @@ -1837,6 +5741,33 @@ "url": "https://github.com/sponsors/rawify" } }, + "node_modules/framer-motion": { + "version": "12.23.26", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.23.26.tgz", + "integrity": "sha512-cPcIhgR42xBn1Uj+PzOyheMtZ73H927+uWPDVhUMqxy8UHt6Okavb6xIz9J/phFUHUj0OncR6UvMfJTXoc/LKA==", + "license": "MIT", + "dependencies": { + "motion-dom": "^12.23.23", + "motion-utils": "^12.23.6", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "@emotion/is-prop-valid": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@emotion/is-prop-valid": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz", @@ -1861,6 +5792,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/get-east-asian-width": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz", + "integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/get-intrinsic": { "version": "1.3.0", "resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz", @@ -1898,6 +5841,24 @@ "node": ">= 0.4" } }, + "node_modules/get-value": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", + "integrity": "sha512-Ln0UQDlxH1BapMu3GPtf7CuYNwRZf2gwCuPqbyG6pB8WfmFpzqcy4xtAaAMUhnNqjMKTiCPZG2oMT3YSx8U2NA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/giscus": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/giscus/-/giscus-1.6.0.tgz", + "integrity": "sha512-Zrsi8r4t1LVW950keaWcsURuZUQwUaMKjvJgTCY125vkW6OiEBkatE7ScJDbpqKHdZwb///7FVC21SE3iFK3PQ==", + "license": "MIT", + "dependencies": { + "lit": "^3.2.1" + } + }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmmirror.com/glob-parent/-/glob-parent-6.0.2.tgz", @@ -1923,6 +5884,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/hachure-fill": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/hachure-fill/-/hachure-fill-0.5.2.tgz", + "integrity": "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==", + "license": "MIT" + }, "node_modules/has-symbols": { "version": "1.1.0", "resolved": "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz", @@ -1962,6 +5929,269 @@ "node": ">= 0.4" } }, + "node_modules/hast-util-from-dom": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-dom/-/hast-util-from-dom-5.0.1.tgz", + "integrity": "sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q==", + "license": "ISC", + "dependencies": { + "@types/hast": "^3.0.0", + "hastscript": "^9.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-html": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-html/-/hast-util-from-html-2.0.3.tgz", + "integrity": "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.1.0", + "hast-util-from-parse5": "^8.0.0", + "parse5": "^7.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-html-isomorphic": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hast-util-from-html-isomorphic/-/hast-util-from-html-isomorphic-2.0.0.tgz", + "integrity": "sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-from-dom": "^5.0.0", + "hast-util-from-html": "^2.0.0", + "unist-util-remove-position": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz", + "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^9.0.0", + "property-information": "^7.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-is-element": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz", + "integrity": "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz", + "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz", + "integrity": "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.5.tgz", + "integrity": "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-whitespace": "^3.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.1.tgz", + "integrity": "sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-text": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz", + "integrity": "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "hast-util-is-element": "^3.0.0", + "unist-util-find-after": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz", + "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/he": { "version": "1.2.0", "resolved": "https://registry.npmmirror.com/he/-/he-1.2.0.tgz", @@ -1972,6 +6202,131 @@ "he": "bin/he" } }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "license": "BSD-3-Clause", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hoist-non-react-statics/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/html-url-attributes": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", + "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/immer": { + "version": "11.1.3", + "resolved": "https://registry.npmjs.org/immer/-/immer-11.1.3.tgz", + "integrity": "sha512-6jQTc5z0KJFtr1UgFpIL3N9XSC3saRaI9PwWtzM2pSqkNGtiNkYY2OSwkOGDK2XcTRcLb1pi/aNkKZz0nxVH4Q==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + "license": "MIT" + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/intersection-observer": { + "version": "0.12.2", + "resolved": "https://registry.npmjs.org/intersection-observer/-/intersection-observer-0.12.2.tgz", + "integrity": "sha512-7m1vEcPCxXYI8HqnL8CKI6siDyD+eIWSwgB3DZA+ZTogxk9I4CDnj4wilt9x/+/QbHI4YG5YZNmC6458/e9Ktg==", + "deprecated": "The Intersection Observer polyfill is no longer needed and can safely be removed. Intersection Observer has been Baseline since 2019.", + "license": "Apache-2.0" + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "license": "MIT" + }, "node_modules/is-binary-path": { "version": "2.1.0", "resolved": "https://registry.npmmirror.com/is-binary-path/-/is-binary-path-2.1.0.tgz", @@ -1989,7 +6344,6 @@ "version": "2.16.1", "resolved": "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.16.1.tgz", "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", - "dev": true, "license": "MIT", "dependencies": { "hasown": "^2.0.2" @@ -2001,6 +6355,28 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "license": "MIT", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmmirror.com/is-extglob/-/is-extglob-2.1.1.tgz", @@ -2024,6 +6400,22 @@ "node": ">=0.10.0" } }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-mobile": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-mobile/-/is-mobile-5.0.0.tgz", + "integrity": "sha512-Tz/yndySvLAEXh+Uk8liFCxOwVH6YutuR74utvOcu7I9Di+DwM0mtdPVZNaVvvBUM2OXxne/NhOs1zAO7riusQ==", + "license": "MIT" + }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz", @@ -2034,23 +6426,173 @@ "node": ">=0.12.0" } }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "license": "MIT", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/jiti": { "version": "1.21.7", "resolved": "https://registry.npmmirror.com/jiti/-/jiti-1.21.7.tgz", "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", "dev": true, "license": "MIT", + "peer": true, "bin": { "jiti": "bin/jiti.js" } }, + "node_modules/js-cookie": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-3.0.5.tgz", + "integrity": "sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==", + "license": "MIT", + "engines": { + "node": ">=14" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz", "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true, "license": "MIT" }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "license": "MIT" + }, + "node_modules/json2mq": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/json2mq/-/json2mq-0.2.0.tgz", + "integrity": "sha512-SzoRg7ux5DWTII9J2qkrZrqV1gt+rTaoufMxEzXbS26Uid0NwaJd123HcoB80TgubEppxxIGdNxCx50fEoEWQA==", + "license": "MIT", + "dependencies": { + "string-convert": "^0.2.0" + } + }, + "node_modules/katex": { + "version": "0.16.27", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.27.tgz", + "integrity": "sha512-aeQoDkuRWSqQN6nSvVCEFvfXdqo1OQiCmmW1kc9xSdjutPv7BGO7pqY9sQRJpMOGrEdfDgF2TfRXe5eUAD2Waw==", + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "license": "MIT", + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/katex/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/khroma": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", + "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" + }, + "node_modules/langium": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/langium/-/langium-3.3.1.tgz", + "integrity": "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==", + "license": "MIT", + "dependencies": { + "chevrotain": "~11.0.3", + "chevrotain-allstar": "~0.3.0", + "vscode-languageserver": "~9.0.1", + "vscode-languageserver-textdocument": "~1.0.11", + "vscode-uri": "~3.0.8" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/langium/node_modules/vscode-uri": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.0.8.tgz", + "integrity": "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==", + "license": "MIT" + }, + "node_modules/layout-base": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", + "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==", + "license": "MIT" + }, + "node_modules/leva": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/leva/-/leva-0.10.1.tgz", + "integrity": "sha512-BcjnfUX8jpmwZUz2L7AfBtF9vn4ggTH33hmeufDULbP3YgNZ/C+ss/oO3stbrqRQyaOmRwy70y7BGTGO81S3rA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-portal": "^1.1.4", + "@radix-ui/react-tooltip": "^1.1.8", + "@stitches/react": "^1.2.8", + "@use-gesture/react": "^10.2.5", + "colord": "^2.9.2", + "dequal": "^2.0.2", + "merge-value": "^1.0.0", + "react-colorful": "^5.5.1", + "react-dropzone": "^12.0.0", + "v8n": "^1.3.3", + "zustand": "^3.6.9" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, "node_modules/lilconfig": { "version": "3.1.3", "resolved": "https://registry.npmmirror.com/lilconfig/-/lilconfig-3.1.3.tgz", @@ -2068,9 +6610,82 @@ "version": "1.2.4", "resolved": "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz", "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "dev": true, "license": "MIT" }, + "node_modules/lit": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/lit/-/lit-3.3.2.tgz", + "integrity": "sha512-NF9zbsP79l4ao2SNrH3NkfmFgN/hBYSQo90saIVI1o5GpjAdCPVstVzO1MrLOakHoEhYkrtRjPK6Ob521aoYWQ==", + "license": "BSD-3-Clause", + "dependencies": { + "@lit/reactive-element": "^2.1.0", + "lit-element": "^4.2.0", + "lit-html": "^3.3.0" + } + }, + "node_modules/lit-element": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/lit-element/-/lit-element-4.2.2.tgz", + "integrity": "sha512-aFKhNToWxoyhkNDmWZwEva2SlQia+jfG0fjIWV//YeTaWrVnOxD89dPKfigCUspXFmjzOEUQpOkejH5Ly6sG0w==", + "license": "BSD-3-Clause", + "dependencies": { + "@lit-labs/ssr-dom-shim": "^1.5.0", + "@lit/reactive-element": "^2.1.0", + "lit-html": "^3.3.0" + } + }, + "node_modules/lit-html": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-3.3.2.tgz", + "integrity": "sha512-Qy9hU88zcmaxBXcc10ZpdK7cOLXvXpRoBxERdtqV9QOrfpMZZ6pSYP91LhpPtap3sFMUiL7Tw2RImbe0Al2/kw==", + "license": "BSD-3-Clause", + "dependencies": { + "@types/trusted-types": "^2.0.2" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "license": "MIT" + }, + "node_modules/lodash-es": { + "version": "4.17.22", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.22.tgz", + "integrity": "sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q==", + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lucide-react": { + "version": "0.562.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz", + "integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, "node_modules/magic-string": { "version": "0.30.21", "resolved": "https://registry.npmmirror.com/magic-string/-/magic-string-0.30.21.tgz", @@ -2080,6 +6695,40 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/marked": { + "version": "17.0.1", + "resolved": "https://registry.npmjs.org/marked/-/marked-17.0.1.tgz", + "integrity": "sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==", + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 20" + } + }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -2089,6 +6738,353 @@ "node": ">= 0.4" } }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-math": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-math/-/mdast-util-math-3.0.0.tgz", + "integrity": "sha512-Tl9GBNeG/AhJnQM221bJR2HPvLOSnLE/T9cJI9tlc6zwQk2nPk/4f0cHkOdEixQPC/j8UtKDdITswvLAy1OZ1w==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "longest-streak": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.1.0", + "unist-util-remove-position": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-newline-to-break": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-newline-to-break/-/mdast-util-newline-to-break-2.0.0.tgz", + "integrity": "sha512-MbgeFca0hLYIEx/2zGsszCSEJJ1JSCdiY5xQxRcLDDGa8EPvlLPupJ4DSajbMPAnC0je8jfb9TiUATnxxrHUog==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-find-and-replace": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/merge-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/merge-value/-/merge-value-1.0.0.tgz", + "integrity": "sha512-fJMmvat4NeKz63Uv9iHWcPDjCWcCkoiRoajRTEO8hlhUC6rwaHg0QCF9hBOTjZmm4JuglPckPSTtcuJL5kp0TQ==", + "license": "MIT", + "dependencies": { + "get-value": "^2.0.6", + "is-extendable": "^1.0.0", + "mixin-deep": "^1.2.0", + "set-value": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmmirror.com/merge2/-/merge2-1.4.1.tgz", @@ -2099,6 +7095,840 @@ "node": ">= 8" } }, + "node_modules/mermaid": { + "version": "11.12.2", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.12.2.tgz", + "integrity": "sha512-n34QPDPEKmaeCG4WDMGy0OT6PSyxKCfy2pJgShP+Qow2KLrvWjclwbc3yXfSIf4BanqWEhQEpngWwNp/XhZt6w==", + "license": "MIT", + "dependencies": { + "@braintree/sanitize-url": "^7.1.1", + "@iconify/utils": "^3.0.1", + "@mermaid-js/parser": "^0.6.3", + "@types/d3": "^7.4.3", + "cytoscape": "^3.29.3", + "cytoscape-cose-bilkent": "^4.1.0", + "cytoscape-fcose": "^2.2.0", + "d3": "^7.9.0", + "d3-sankey": "^0.12.3", + "dagre-d3-es": "7.0.13", + "dayjs": "^1.11.18", + "dompurify": "^3.2.5", + "katex": "^0.16.22", + "khroma": "^2.1.0", + "lodash-es": "^4.17.21", + "marked": "^16.2.1", + "roughjs": "^4.6.6", + "stylis": "^4.3.6", + "ts-dedent": "^2.2.0", + "uuid": "^11.1.0" + } + }, + "node_modules/mermaid/node_modules/marked": { + "version": "16.4.2", + "resolved": "https://registry.npmjs.org/marked/-/marked-16.4.2.tgz", + "integrity": "sha512-TI3V8YYWvkVf3KJe1dRkpnjs68JUPyEa5vjKrp1XEEJUAOaQc+Qj+L1qWbPd0SJuAdQkFU0h73sXXqwDYxsiDA==", + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/mermaid/node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-cjk-friendly": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/micromark-extension-cjk-friendly/-/micromark-extension-cjk-friendly-1.2.3.tgz", + "integrity": "sha512-gRzVLUdjXBLX6zNPSnHGDoo+ZTp5zy+MZm0g3sv+3chPXY7l9gW+DnrcHcZh/jiPR6MjPKO4AEJNp4Aw6V9z5Q==", + "license": "MIT", + "dependencies": { + "devlop": "^1.1.0", + "micromark-extension-cjk-friendly-util": "2.1.1", + "micromark-util-chunked": "^2.0.1", + "micromark-util-resolve-all": "^2.0.1", + "micromark-util-symbol": "^2.0.1" + }, + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "micromark": "^4.0.0", + "micromark-util-types": "^2.0.0" + }, + "peerDependenciesMeta": { + "micromark-util-types": { + "optional": true + } + } + }, + "node_modules/micromark-extension-cjk-friendly-util": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-cjk-friendly-util/-/micromark-extension-cjk-friendly-util-2.1.1.tgz", + "integrity": "sha512-egs6+12JU2yutskHY55FyR48ZiEcFOJFyk9rsiyIhcJ6IvWB6ABBqVrBw8IobqJTDZ/wdSr9eoXDPb5S2nW1bg==", + "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.3.0", + "micromark-util-character": "^2.1.1", + "micromark-util-symbol": "^2.0.1" + }, + "engines": { + "node": ">=16" + }, + "peerDependenciesMeta": { + "micromark-util-types": { + "optional": true + } + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-math": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-math/-/micromark-extension-math-3.1.0.tgz", + "integrity": "sha512-lvEqd+fHjATVs+2v/8kg9i5Q0AP2k85H0WUOwpIVvUML8BapsMvh1XAogmQjOCsLpoKRCVQqEkQBB3NhVBcsOg==", + "license": "MIT", + "dependencies": { + "@types/katex": "^0.16.0", + "devlop": "^1.0.0", + "katex": "^0.16.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.1.tgz", + "integrity": "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.2.tgz", + "integrity": "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "license": "MIT", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.3.tgz", + "integrity": "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.3.tgz", + "integrity": "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, "node_modules/micromatch": { "version": "4.0.8", "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.8.tgz", @@ -2150,6 +7980,78 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/mixin-deep": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", + "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", + "license": "MIT", + "dependencies": { + "for-in": "^1.0.2", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/mlly": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.8.0.tgz", + "integrity": "sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==", + "license": "MIT", + "dependencies": { + "acorn": "^8.15.0", + "pathe": "^2.0.3", + "pkg-types": "^1.3.1", + "ufo": "^1.6.1" + } + }, + "node_modules/motion": { + "version": "12.23.26", + "resolved": "https://registry.npmjs.org/motion/-/motion-12.23.26.tgz", + "integrity": "sha512-Ll8XhVxY8LXMVYTCfme27WH2GjBrCIzY4+ndr5QKxsK+YwCtOi2B/oBi5jcIbik5doXuWT/4KKDOVAZJkeY5VQ==", + "license": "MIT", + "dependencies": { + "framer-motion": "^12.23.26", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "@emotion/is-prop-valid": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@emotion/is-prop-valid": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/motion-dom": { + "version": "12.23.23", + "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.23.23.tgz", + "integrity": "sha512-n5yolOs0TQQBRUFImrRfs/+6X4p3Q4n1dUEqt/H58Vx7OW6RF+foWEgmTVDhIWJIMXOuNNL0apKH2S16en9eiA==", + "license": "MIT", + "dependencies": { + "motion-utils": "^12.23.6" + } + }, + "node_modules/motion-utils": { + "version": "12.23.6", + "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-12.23.6.tgz", + "integrity": "sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ==", + "license": "MIT" + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, "node_modules/muggle-string": { "version": "0.4.1", "resolved": "https://registry.npmmirror.com/muggle-string/-/muggle-string-0.4.1.tgz", @@ -2221,11 +8123,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/numeral": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/numeral/-/numeral-2.0.6.tgz", + "integrity": "sha512-qaKRmtYPZ5qdw4jWJD6bxEf1FJEqllJrwxCLIm0sQU/A7v2/czigzOb+C2uSiFsa9lBUzeH7M1oK+Q+OLxL3kA==", + "license": "MIT", + "engines": { + "node": "*" + } + }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz", "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "dev": true, "license": "MIT", "engines": { "node": ">=0.10.0" @@ -2241,6 +8151,120 @@ "node": ">= 6" } }, + "node_modules/on-change": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/on-change/-/on-change-4.0.2.tgz", + "integrity": "sha512-cMtCyuJmTx/bg2HCpHo3ZLeF7FZnBOapLqZHr2AlLeJ5Ul0Zu2mUJJz051Fdwu/Et2YW04ZD+TtU+gVy0ACNCA==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/on-change?sponsor=1" + } + }, + "node_modules/oniguruma-parser": { + "version": "0.12.1", + "resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz", + "integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==", + "license": "MIT" + }, + "node_modules/oniguruma-to-es": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.4.tgz", + "integrity": "sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==", + "license": "MIT", + "dependencies": { + "oniguruma-parser": "^0.12.1", + "regex": "^6.0.1", + "regex-recursion": "^6.0.2" + } + }, + "node_modules/package-manager-detector": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-1.6.0.tgz", + "integrity": "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==", + "license": "MIT" + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5/node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/path-browserify": { "version": "1.0.1", "resolved": "https://registry.npmmirror.com/path-browserify/-/path-browserify-1.0.1.tgz", @@ -2248,6 +8272,12 @@ "dev": true, "license": "MIT" }, + "node_modules/path-data-parser": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/path-data-parser/-/path-data-parser-0.1.0.tgz", + "integrity": "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==", + "license": "MIT" + }, "node_modules/path-key": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/path-key/-/path-key-4.0.0.tgz", @@ -2265,7 +8295,21 @@ "version": "1.0.7", "resolved": "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true, + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", "license": "MIT" }, "node_modules/picocolors": { @@ -2329,6 +8373,45 @@ "node": ">= 6" } }, + "node_modules/pkg-types": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz", + "integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==", + "license": "MIT", + "dependencies": { + "confbox": "^0.1.8", + "mlly": "^1.7.4", + "pathe": "^2.0.1" + } + }, + "node_modules/points-on-curve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/points-on-curve/-/points-on-curve-0.2.0.tgz", + "integrity": "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==", + "license": "MIT" + }, + "node_modules/points-on-path": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/points-on-path/-/points-on-path-0.2.1.tgz", + "integrity": "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==", + "license": "MIT", + "dependencies": { + "path-data-parser": "0.1.0", + "points-on-curve": "0.2.0" + } + }, + "node_modules/polished": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz", + "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.17.8" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/postcss": { "version": "8.5.6", "resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.5.6.tgz", @@ -2348,6 +8431,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -2491,12 +8575,56 @@ "dev": true, "license": "MIT" }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmmirror.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz", "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", "license": "MIT" }, + "node_modules/query-string": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/query-string/-/query-string-9.3.1.tgz", + "integrity": "sha512-5fBfMOcDi5SA9qj5jZhWAcTtDfKF5WFdd2uD9nVNlbxVv1baq65aALy6qofpNEGELHvisjjasxQp7BlM9gvMzw==", + "license": "MIT", + "dependencies": { + "decode-uri-component": "^0.4.1", + "filter-obj": "^5.1.0", + "split-on-first": "^3.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -2518,6 +8646,465 @@ ], "license": "MIT" }, + "node_modules/rc-collapse": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/rc-collapse/-/rc-collapse-4.0.0.tgz", + "integrity": "sha512-SwoOByE39/3oIokDs/BnkqI+ltwirZbP8HZdq1/3SkPSBi7xDdvWHTp7cpNI9ullozkR6mwTWQi6/E/9huQVrA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.3.4", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dialog": { + "version": "9.6.0", + "resolved": "https://registry.npmjs.org/rc-dialog/-/rc-dialog-9.6.0.tgz", + "integrity": "sha512-ApoVi9Z8PaCQg6FsUzS8yvBEQy0ZL2PkuvAgrmohPkN3okps5WZ5WQWPc1RNuiOKaAYv8B97ACdsFU5LizzCqg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/portal": "^1.0.0-8", + "classnames": "^2.2.6", + "rc-motion": "^2.3.0", + "rc-util": "^5.21.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dialog/node_modules/@rc-component/portal": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@rc-component/portal/-/portal-1.1.2.tgz", + "integrity": "sha512-6f813C0IsasTZms08kfA8kPAGxbbkYToa8ALaiDIGGECU4i9hj8Plgbx0sNJDrey3EtHO30hmdaxtT0138xZcg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-footer": { + "version": "0.6.8", + "resolved": "https://registry.npmjs.org/rc-footer/-/rc-footer-0.6.8.tgz", + "integrity": "sha512-JBZ+xcb6kkex8XnBd4VHw1ZxjV6kmcwUumSHaIFdka2qzMCo7Klcy4sI6G0XtUpG/vtpislQCc+S9Bc+NLHYMg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-image": { + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/rc-image/-/rc-image-7.12.0.tgz", + "integrity": "sha512-cZ3HTyyckPnNnUb9/DRqduqzLfrQRyi+CdHjdqgsyDpI3Ln5UX1kXnAhPBSJj9pVRzwRFgqkN7p9b6HBDjmu/Q==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "@rc-component/portal": "^1.0.2", + "classnames": "^2.2.6", + "rc-dialog": "~9.6.0", + "rc-motion": "^2.6.2", + "rc-util": "^5.34.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-image/node_modules/@rc-component/portal": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@rc-component/portal/-/portal-1.1.2.tgz", + "integrity": "sha512-6f813C0IsasTZms08kfA8kPAGxbbkYToa8ALaiDIGGECU4i9hj8Plgbx0sNJDrey3EtHO30hmdaxtT0138xZcg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-input": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/rc-input/-/rc-input-1.8.0.tgz", + "integrity": "sha512-KXvaTbX+7ha8a/k+eg6SYRVERK0NddX8QX7a7AnRvUa/rEH0CNMlpcBzBkhI0wp2C8C4HlMoYl8TImSN+fuHKA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.18.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-input-number": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/rc-input-number/-/rc-input-number-9.5.0.tgz", + "integrity": "sha512-bKaEvB5tHebUURAEXw35LDcnRZLq3x1k7GxfAqBMzmpHkDGzjAtnUL8y4y5N15rIFIg5IJgwr211jInl3cipag==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/mini-decimal": "^1.0.1", + "classnames": "^2.2.5", + "rc-input": "~1.8.0", + "rc-util": "^5.40.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-menu": { + "version": "9.16.1", + "resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-9.16.1.tgz", + "integrity": "sha512-ghHx6/6Dvp+fw8CJhDUHFHDJ84hJE3BXNCzSgLdmNiFErWSOaZNsihDAsKq9ByTALo/xkNIwtDFGIl6r+RPXBg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.0.0", + "classnames": "2.x", + "rc-motion": "^2.4.3", + "rc-overflow": "^1.3.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-menu/node_modules/@rc-component/portal": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@rc-component/portal/-/portal-1.1.2.tgz", + "integrity": "sha512-6f813C0IsasTZms08kfA8kPAGxbbkYToa8ALaiDIGGECU4i9hj8Plgbx0sNJDrey3EtHO30hmdaxtT0138xZcg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-menu/node_modules/@rc-component/trigger": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@rc-component/trigger/-/trigger-2.3.0.tgz", + "integrity": "sha512-iwaxZyzOuK0D7lS+0AQEtW52zUWxoGqTGkke3dRyb8pYiShmRpCjB/8TzPI4R6YySCH7Vm9BZj/31VPiiQTLBg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.2", + "@rc-component/portal": "^1.1.0", + "classnames": "^2.3.2", + "rc-motion": "^2.0.0", + "rc-resize-observer": "^1.3.1", + "rc-util": "^5.44.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-motion": { + "version": "2.9.5", + "resolved": "https://registry.npmjs.org/rc-motion/-/rc-motion-2.9.5.tgz", + "integrity": "sha512-w+XTUrfh7ArbYEd2582uDrEhmBHwK1ZENJiSJVb7uRxdE7qJSYjbO2eksRXmndqyKqKoYPc9ClpPh5242mV1vA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.44.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-overflow": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/rc-overflow/-/rc-overflow-1.5.0.tgz", + "integrity": "sha512-Lm/v9h0LymeUYJf0x39OveU52InkdRXqnn2aYXfWmo8WdOonIKB2kfau+GF0fWq6jPgtdO9yMqveGcK6aIhJmg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.37.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-resize-observer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/rc-resize-observer/-/rc-resize-observer-1.4.3.tgz", + "integrity": "sha512-YZLjUbyIWox8E9i9C3Tm7ia+W7euPItNWSPX5sCcQTYbnwDb5uNpnLHQCG1f22oZWUhLw4Mv2tFmeWe68CDQRQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.7", + "classnames": "^2.2.1", + "rc-util": "^5.44.1", + "resize-observer-polyfill": "^1.5.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-util": { + "version": "5.44.4", + "resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.44.4.tgz", + "integrity": "sha512-resueRJzmHG9Q6rI/DfK6Kdv9/Lfls05vzMs1Sk3M2P+3cJa+MakaZyWY8IPfehVuhPJFKrIY1IK4GqbiaiY5w==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "react-is": "^18.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/re-resizable": { + "version": "6.11.2", + "resolved": "https://registry.npmjs.org/re-resizable/-/re-resizable-6.11.2.tgz", + "integrity": "sha512-2xI2P3OHs5qw7K0Ud1aLILK6MQxW50TcO+DetD9eIV58j84TqYeHoZcL9H4GXFXXIh7afhH8mv5iUCXII7OW7A==", + "license": "MIT", + "peerDependencies": { + "react": "^16.13.1 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.13.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", + "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-avatar-editor": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/react-avatar-editor/-/react-avatar-editor-14.0.0.tgz", + "integrity": "sha512-NaQM3oo4u0a1/Njjutc2FjwKX35vQV+t6S8hovsbAlMpBN1ntIwP/g+Yr9eDIIfaNtRXL0AqboTnPmRxhD/i8A==", + "license": "MIT", + "peerDependencies": { + "react": "^0.14.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^0.14.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-colorful": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/react-colorful/-/react-colorful-5.6.1.tgz", + "integrity": "sha512-1exovf0uGTGyq5mXQT0zgQ80uvj2PCwvF8zY1RN9/vbJVSjSo3fsB/4L3ObbF7u70NduSiK4xu4Y6q1MHoUGEw==", + "license": "MIT", + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==", + "license": "MIT", + "peer": true, + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.3" + } + }, + "node_modules/react-draggable": { + "version": "4.4.6", + "resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-4.4.6.tgz", + "integrity": "sha512-LtY5Xw1zTPqHkVmtM3X8MUOxNDOUhv/khTgBgrUvwaS064bwVvxT+q5El0uUFNx5IEPKXuRejr7UqLwBIg5pdw==", + "license": "MIT", + "dependencies": { + "clsx": "^1.1.1", + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "react": ">= 16.3.0", + "react-dom": ">= 16.3.0" + } + }, + "node_modules/react-draggable/node_modules/clsx": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", + "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/react-dropzone": { + "version": "12.1.0", + "resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-12.1.0.tgz", + "integrity": "sha512-iBYHA1rbopIvtzokEX4QubO6qk5IF/x3BtKGu74rF2JkQDXnwC4uO/lHKpaw4PJIV6iIAYOlwLv2FpiGyqHNog==", + "license": "MIT", + "dependencies": { + "attr-accept": "^2.2.2", + "file-selector": "^0.5.0", + "prop-types": "^15.8.1" + }, + "engines": { + "node": ">= 10.13" + }, + "peerDependencies": { + "react": ">= 16.8" + } + }, + "node_modules/react-error-boundary": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/react-error-boundary/-/react-error-boundary-6.0.1.tgz", + "integrity": "sha512-zArgQpjJUN1ZLMEKWtifxQweW3yfvwL5j2nh3Pesze1qG6r5oCDMy/TA97bUF01wy4xCeeL4/pd8GHmvEsP3Bg==", + "license": "MIT", + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==", + "license": "MIT" + }, + "node_modules/react-hotkeys-hook": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/react-hotkeys-hook/-/react-hotkeys-hook-5.2.1.tgz", + "integrity": "sha512-xbKh6zJxd/vJHT4Bw4+0pBD662Fk20V+VFhLqciCg+manTVO4qlqRqiwFOYelfHN9dBvWj9vxaPkSS26ZSIJGg==", + "license": "MIT", + "workspaces": [ + "packages/*" + ], + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/react-markdown": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz", + "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, + "node_modules/react-merge-refs": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/react-merge-refs/-/react-merge-refs-3.0.2.tgz", + "integrity": "sha512-MSZAfwFfdbEvwkKWP5EI5chuLYnNUxNS7vyS0i1Jp+wtd8J4Ga2ddzhaE68aMol2Z4vCnRM/oGOo1a3V75UPlw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "react": ">=16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "react": { + "optional": true + } + } + }, + "node_modules/react-rnd": { + "version": "10.5.2", + "resolved": "https://registry.npmjs.org/react-rnd/-/react-rnd-10.5.2.tgz", + "integrity": "sha512-0Tm4x7k7pfHf2snewJA8x7Nwgt3LV+58MVEWOVsFjk51eYruFEa6Wy7BNdxt4/lH0wIRsu7Gm3KjSXY2w7YaNw==", + "license": "MIT", + "dependencies": { + "re-resizable": "6.11.2", + "react-draggable": "4.4.6", + "tslib": "2.6.2" + }, + "peerDependencies": { + "react": ">=16.3.0", + "react-dom": ">=16.3.0" + } + }, + "node_modules/react-rnd/node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", + "license": "0BSD" + }, + "node_modules/react-zoom-pan-pinch": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/react-zoom-pan-pinch/-/react-zoom-pan-pinch-3.7.0.tgz", + "integrity": "sha512-UmReVZ0TxlKzxSbYiAj+LeGRW8s8LraAFTXRAxzMYnNRgGPsxCudwZKVkjvGmjtx7SW/hZamt69NUmGf4xrkXA==", + "license": "MIT", + "engines": { + "node": ">=8", + "npm": ">=5" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, "node_modules/read-cache": { "version": "1.0.0", "resolved": "https://registry.npmmirror.com/read-cache/-/read-cache-1.0.0.tgz", @@ -2541,11 +9128,327 @@ "node": ">=8.10.0" } }, + "node_modules/recma-build-jsx": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz", + "integrity": "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-jsx": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.1.tgz", + "integrity": "sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==", + "license": "MIT", + "dependencies": { + "acorn-jsx": "^5.0.0", + "estree-util-to-js": "^2.0.0", + "recma-parse": "^1.0.0", + "recma-stringify": "^1.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/recma-parse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz", + "integrity": "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "esast-util-from-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-stringify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz", + "integrity": "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-to-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/regex/-/regex-6.1.0.tgz", + "integrity": "sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==", + "license": "MIT", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-recursion": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-6.0.2.tgz", + "integrity": "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==", + "license": "MIT", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-utilities": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/regex-utilities/-/regex-utilities-2.3.0.tgz", + "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==", + "license": "MIT" + }, + "node_modules/rehype-github-alerts": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/rehype-github-alerts/-/rehype-github-alerts-4.2.0.tgz", + "integrity": "sha512-6di6kEu9WUHKLKrkKG2xX6AOuaCMGghg0Wq7MEuM/jBYUPVIq6PJpMe00dxMfU+/YSBtDXhffpDimgDi+BObIQ==", + "license": "MIT", + "dependencies": { + "@primer/octicons": "^19.20.0", + "hast-util-from-html": "^2.0.3", + "hast-util-is-element": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/chrisweb" + } + }, + "node_modules/rehype-katex": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz", + "integrity": "sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/katex": "^0.16.0", + "hast-util-from-html-isomorphic": "^2.0.0", + "hast-util-to-text": "^4.0.0", + "katex": "^0.16.0", + "unist-util-visit-parents": "^6.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-recma": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz", + "integrity": "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "hast-util-to-estree": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-breaks": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-breaks/-/remark-breaks-4.0.0.tgz", + "integrity": "sha512-IjEjJOkH4FuJvHZVIW0QCDWxcG96kCq7An/KVH2NfJe6rKZU2AsHeB3OEjPNRxi4QC34Xdx7I2KGYn6IpT7gxQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-newline-to-break": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-cjk-friendly": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/remark-cjk-friendly/-/remark-cjk-friendly-1.2.3.tgz", + "integrity": "sha512-UvAgxwlNk+l9Oqgl/9MWK2eWRS7zgBW/nXX9AthV7nd/3lNejF138E7Xbmk9Zs4WjTJGs721r7fAEc7tNFoH7g==", + "license": "MIT", + "dependencies": { + "micromark-extension-cjk-friendly": "1.2.3" + }, + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "@types/mdast": "^4.0.0", + "unified": "^11.0.0" + }, + "peerDependenciesMeta": { + "@types/mdast": { + "optional": true + } + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-github": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/remark-github/-/remark-github-12.0.0.tgz", + "integrity": "sha512-ByefQKFN184LeiGRCabfl7zUJsdlMYWEhiLX1gpmQ11yFg6xSuOTW7LVCv0oc1x+YvUMJW23NU36sJX2RWGgvg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "mdast-util-to-string": "^4.0.0", + "to-vfile": "^8.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-math": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/remark-math/-/remark-math-6.0.0.tgz", + "integrity": "sha512-MMqgnP74Igy+S3WwnhQ7kqGlEerTETXMvJhrUzDikVZ2/uogJCb+WHUg97hK9/jcfc0dkD73s3LN8zU49cTEtA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-math": "^3.0.0", + "micromark-extension-math": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.1.tgz", + "integrity": "sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==", + "license": "MIT", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/reselect": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz", + "integrity": "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==", + "license": "MIT" + }, + "node_modules/resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==", + "license": "MIT" + }, "node_modules/resolve": { "version": "1.22.11", "resolved": "https://registry.npmmirror.com/resolve/-/resolve-1.22.11.tgz", "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", - "dev": true, "license": "MIT", "dependencies": { "is-core-module": "^2.16.1", @@ -2562,6 +9465,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/reusify": { "version": "1.1.0", "resolved": "https://registry.npmmirror.com/reusify/-/reusify-1.1.0.tgz", @@ -2573,6 +9485,12 @@ "node": ">=0.10.0" } }, + "node_modules/robust-predicates": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", + "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==", + "license": "Unlicense" + }, "node_modules/rollup": { "version": "4.53.5", "resolved": "https://registry.npmmirror.com/rollup/-/rollup-4.53.5.tgz", @@ -2615,6 +9533,18 @@ "fsevents": "~2.3.2" } }, + "node_modules/roughjs": { + "version": "4.6.6", + "resolved": "https://registry.npmjs.org/roughjs/-/roughjs-4.6.6.tgz", + "integrity": "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==", + "license": "MIT", + "dependencies": { + "hachure-fill": "^0.5.2", + "path-data-parser": "^0.1.0", + "points-on-curve": "^0.2.0", + "points-on-path": "^0.2.1" + } + }, "node_modules/run-parallel": { "version": "1.2.0", "resolved": "https://registry.npmmirror.com/run-parallel/-/run-parallel-1.2.0.tgz", @@ -2639,6 +9569,124 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/rw": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", + "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==", + "license": "BSD-3-Clause" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/screenfull": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/screenfull/-/screenfull-5.2.0.tgz", + "integrity": "sha512-9BakfsO2aUQN2K9Fdbj87RJIEZ82Q9IGim7FqM5OsebfoFC6ZHXgDq/KvniuLTPdeM8wY2o6Dj3WQ7KeQCj3cA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/scroll-into-view-if-needed": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/scroll-into-view-if-needed/-/scroll-into-view-if-needed-3.1.0.tgz", + "integrity": "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==", + "license": "MIT", + "dependencies": { + "compute-scroll-into-view": "^3.0.2" + } + }, + "node_modules/semver-compare": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz", + "integrity": "sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==", + "license": "MIT" + }, + "node_modules/set-value": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", + "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", + "license": "MIT", + "dependencies": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.3", + "split-string": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/set-value/node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/shiki": { + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/shiki/-/shiki-3.20.0.tgz", + "integrity": "sha512-kgCOlsnyWb+p0WU+01RjkCH+eBVsjL1jOwUYWv0YDWkM2/A46+LDKVs5yZCUXjJG6bj4ndFoAg5iLIIue6dulg==", + "license": "MIT", + "dependencies": { + "@shikijs/core": "3.20.0", + "@shikijs/engine-javascript": "3.20.0", + "@shikijs/engine-oniguruma": "3.20.0", + "@shikijs/langs": "3.20.0", + "@shikijs/themes": "3.20.0", + "@shikijs/types": "3.20.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/shiki-stream": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/shiki-stream/-/shiki-stream-0.1.3.tgz", + "integrity": "sha512-pDIqmaP/zJWHNV8bJKp0tD0CZ6OkF+lWTIvmNRLktlTjBjN3+durr19JarS657U1oSEf/WrSYmdzwr9CeD6m2Q==", + "license": "MIT", + "dependencies": { + "@shikijs/core": "^3.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "react": "^19.0.0", + "vue": "^3.2.0" + }, + "peerDependenciesMeta": { + "react": { + "optional": true + }, + "vue": { + "optional": true + } + } + }, + "node_modules/source-map": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz", + "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">= 12" + } + }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.2.1.tgz", @@ -2648,6 +9696,53 @@ "node": ">=0.10.0" } }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/split-on-first": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/split-on-first/-/split-on-first-3.0.0.tgz", + "integrity": "sha512-qxQJTx2ryR0Dw0ITYyekNQWpz6f8dGd7vffGNflQQ3Iqj9NJ6qiZ7ELpZsJ/QBhIVAiDfXdag3+Gp8RvWa62AA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/split-string": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", + "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "license": "MIT", + "dependencies": { + "extend-shallow": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/split-string/node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==", + "license": "MIT", + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/ssf": { "version": "0.11.2", "resolved": "https://registry.npmmirror.com/ssf/-/ssf-0.11.2.tgz", @@ -2660,6 +9755,26 @@ "node": ">=0.8" } }, + "node_modules/string-convert": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/string-convert/-/string-convert-0.2.1.tgz", + "integrity": "sha512-u/1tdPl4yQnPBjnVrmdLo9gtuLvELKsAoRapekWggdiQNvvvum+jYF329d84NAa660KQw7pB2n36KrIKVoXa3A==", + "license": "MIT" + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/strip-ansi": { "version": "7.1.2", "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-7.1.2.tgz", @@ -2676,6 +9791,30 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, "node_modules/sucrase": { "version": "3.35.1", "resolved": "https://registry.npmmirror.com/sucrase/-/sucrase-3.35.1.tgz", @@ -2703,7 +9842,6 @@ "version": "1.0.0", "resolved": "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -2712,6 +9850,25 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/swr": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/swr/-/swr-2.3.8.tgz", + "integrity": "sha512-gaCPRVoMq8WGDcWj9p4YWzCMPHzE0WNl6W8ADIx9c3JBEIdMkJGMzW+uzXvxHMltwcYACr9jP+32H8/hgwMR7w==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.3", + "use-sync-external-store": "^1.6.0" + }, + "peerDependencies": { + "react": "^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/tabbable": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.4.0.tgz", + "integrity": "sha512-05PUHKSNE8ou2dwIxTngl4EzcnsCDZGJ/iCLtDflR/SHB/ny14rXc+qU5P4mG9JkusiV7EivzY9Mhm55AzAvCg==", + "license": "MIT" + }, "node_modules/tailwindcss": { "version": "3.4.19", "resolved": "https://registry.npmmirror.com/tailwindcss/-/tailwindcss-3.4.19.tgz", @@ -2773,6 +9930,15 @@ "node": ">=0.8" } }, + "node_modules/throttle-debounce": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-5.0.2.tgz", + "integrity": "sha512-B71/4oyj61iNH0KeCamLuE2rmKuTO5byTOSVwECM5FA7TiAiAW+UqTKZ9ERueC4qvgSttUhdmq1mXC3kJqGX7A==", + "license": "MIT", + "engines": { + "node": ">=12.22" + } + }, "node_modules/tiny-invariant": { "version": "1.3.3", "resolved": "https://registry.npmmirror.com/tiny-invariant/-/tiny-invariant-1.3.3.tgz", @@ -2780,6 +9946,15 @@ "dev": true, "license": "MIT" }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/tinyglobby": { "version": "0.2.15", "resolved": "https://registry.npmmirror.com/tinyglobby/-/tinyglobby-0.2.15.tgz", @@ -2821,6 +9996,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2841,6 +10017,48 @@ "node": ">=8.0" } }, + "node_modules/to-vfile": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/to-vfile/-/to-vfile-8.0.0.tgz", + "integrity": "sha512-IcmH1xB5576MJc9qcfEC/m/nQCFt3fzMHz45sSlgJyTWjRbKW1HAkJpuf3DgE57YzIlZcwcBZA5ENQbBo4aLkg==", + "license": "MIT", + "dependencies": { + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "license": "MIT", + "engines": { + "node": ">=6.10" + } + }, "node_modules/ts-interface-checker": { "version": "0.1.13", "resolved": "https://registry.npmmirror.com/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", @@ -2848,12 +10066,28 @@ "dev": true, "license": "Apache-2.0" }, + "node_modules/ts-md5": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ts-md5/-/ts-md5-2.0.1.tgz", + "integrity": "sha512-yF35FCoEOFBzOclSkMNEUbFQZuv89KEQ+5Xz03HrMSGUGB1+r+El+JiGOFwsP4p9RFNzwlrydYoTLvPOuICl9w==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, "node_modules/typescript": { "version": "5.6.3", "resolved": "https://registry.npmmirror.com/typescript/-/typescript-5.6.3.tgz", "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "devOptional": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -2862,6 +10096,12 @@ "node": ">=14.17" } }, + "node_modules/ufo": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.1.tgz", + "integrity": "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==", + "license": "MIT" + }, "node_modules/undici-types": { "version": "6.21.0", "resolved": "https://registry.npmmirror.com/undici-types/-/undici-types-6.21.0.tgz", @@ -2882,6 +10122,135 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "peer": true, + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-find-after": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz", + "integrity": "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz", + "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/update-browserslist-db": { "version": "1.2.3", "resolved": "https://registry.npmmirror.com/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", @@ -2913,6 +10282,33 @@ "browserslist": ">= 4.21.0" } }, + "node_modules/url-join": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-5.0.0.tgz", + "integrity": "sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/use-merge-value": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/use-merge-value/-/use-merge-value-1.2.0.tgz", + "integrity": "sha512-DXgG0kkgJN45TcyoXL49vJnn55LehnrmoHc7MbKi+QDBvr8dsesqws8UlyIWGHMR+JXgxc1nvY+jDGMlycsUcw==", + "license": "MIT", + "peerDependencies": { + "react": ">= 16.x" + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmmirror.com/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -2920,12 +10316,74 @@ "dev": true, "license": "MIT" }, + "node_modules/uuid": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-13.0.0.tgz", + "integrity": "sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist-node/bin/uuid" + } + }, + "node_modules/v8n": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/v8n/-/v8n-1.5.1.tgz", + "integrity": "sha512-LdabyT4OffkyXFCe9UT+uMkxNBs5rcTVuZClvxQr08D5TUgo1OFKkoT65qYRCsiKBl/usHjpXvP4hHMzzDRj3A==", + "license": "MIT" + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/vite": { "version": "5.4.21", "resolved": "https://registry.npmmirror.com/vite/-/vite-5.4.21.tgz", "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", @@ -3085,6 +10543,49 @@ "url": "https://paulmillr.com/funding/" } }, + "node_modules/vscode-jsonrpc": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", + "integrity": "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/vscode-languageserver": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.1.tgz", + "integrity": "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==", + "license": "MIT", + "dependencies": { + "vscode-languageserver-protocol": "3.17.5" + }, + "bin": { + "installServerIntoExtension": "bin/installServerIntoExtension" + } + }, + "node_modules/vscode-languageserver-protocol": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz", + "integrity": "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==", + "license": "MIT", + "dependencies": { + "vscode-jsonrpc": "8.2.0", + "vscode-languageserver-types": "3.17.5" + } + }, + "node_modules/vscode-languageserver-textdocument": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.12.tgz", + "integrity": "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==", + "license": "MIT" + }, + "node_modules/vscode-languageserver-types": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", + "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==", + "license": "MIT" + }, "node_modules/vscode-uri": { "version": "3.1.0", "resolved": "https://registry.npmmirror.com/vscode-uri/-/vscode-uri-3.1.0.tgz", @@ -3097,6 +10598,7 @@ "resolved": "https://registry.npmmirror.com/vue/-/vue-3.5.25.tgz", "integrity": "sha512-YLVdgv2K13WJ6n+kD5owehKtEXwdwXuj2TTyJMsO7pSeKw2bfRNZGjhB7YzrpbMYj5b5QsUebHpOqR3R3ziy/g==", "license": "MIT", + "peer": true, "dependencies": { "@vue/compiler-dom": "3.5.25", "@vue/compiler-sfc": "3.5.25", @@ -3190,6 +10692,7 @@ "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@volar/typescript": "2.4.15", "@vue/language-core": "2.2.12" @@ -3201,6 +10704,16 @@ "typescript": ">=5.0.0" } }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/wmf": { "version": "1.0.2", "resolved": "https://registry.npmmirror.com/wmf/-/wmf-1.0.2.tgz", @@ -3239,6 +10752,33 @@ "engines": { "node": ">=0.8" } + }, + "node_modules/zustand": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-3.7.2.tgz", + "integrity": "sha512-PIJDIZKtokhof+9+60cpockVOq05sJzHCriyvaLBmEJixseQ1a5Kdov6fWZfWOu5SK9c+FhH1jU0tntLxRJYMA==", + "license": "MIT", + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "react": { + "optional": true + } + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } } } } diff --git a/frontend/package.json b/frontend/package.json index e4c047d5..6b0c6b86 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -11,6 +11,7 @@ "typecheck": "vue-tsc --noEmit" }, "dependencies": { + "@lobehub/icons": "^4.0.2", "@vueuse/core": "^10.7.0", "axios": "^1.6.2", "chart.js": "^4.4.1", @@ -25,6 +26,7 @@ }, "devDependencies": { "@types/file-saver": "^2.0.7", + "@types/mdx": "^2.0.13", "@types/node": "^20.10.5", "@vitejs/plugin-vue": "^5.2.3", "autoprefixer": "^10.4.16", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 731e5914..4a4073dd 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -20,6 +20,9 @@ importers: driver.js: specifier: ^1.4.0 version: 1.4.0 + file-saver: + specifier: ^2.0.5 + version: 2.0.5 pinia: specifier: ^2.1.7 version: 2.3.1(typescript@5.6.3)(vue@3.5.26(typescript@5.6.3)) @@ -35,7 +38,13 @@ importers: vue-router: specifier: ^4.2.5 version: 4.6.4(vue@3.5.26(typescript@5.6.3)) + xlsx: + specifier: ^0.18.5 + version: 0.18.5 devDependencies: + '@types/file-saver': + specifier: ^2.0.7 + version: 2.0.7 '@types/node': specifier: ^20.10.5 version: 20.19.27 @@ -303,67 +312,56 @@ packages: resolution: {integrity: sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==} cpu: [arm] os: [linux] - libc: [glibc] '@rollup/rollup-linux-arm-musleabihf@4.54.0': resolution: {integrity: sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==} cpu: [arm] os: [linux] - libc: [musl] '@rollup/rollup-linux-arm64-gnu@4.54.0': resolution: {integrity: sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==} cpu: [arm64] os: [linux] - libc: [glibc] '@rollup/rollup-linux-arm64-musl@4.54.0': resolution: {integrity: sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==} cpu: [arm64] os: [linux] - libc: [musl] '@rollup/rollup-linux-loong64-gnu@4.54.0': resolution: {integrity: sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==} cpu: [loong64] os: [linux] - libc: [glibc] '@rollup/rollup-linux-ppc64-gnu@4.54.0': resolution: {integrity: sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==} cpu: [ppc64] os: [linux] - libc: [glibc] '@rollup/rollup-linux-riscv64-gnu@4.54.0': resolution: {integrity: sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==} cpu: [riscv64] os: [linux] - libc: [glibc] '@rollup/rollup-linux-riscv64-musl@4.54.0': resolution: {integrity: sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==} cpu: [riscv64] os: [linux] - libc: [musl] '@rollup/rollup-linux-s390x-gnu@4.54.0': resolution: {integrity: sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==} cpu: [s390x] os: [linux] - libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.54.0': resolution: {integrity: sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==} cpu: [x64] os: [linux] - libc: [glibc] '@rollup/rollup-linux-x64-musl@4.54.0': resolution: {integrity: sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==} cpu: [x64] os: [linux] - libc: [musl] '@rollup/rollup-openharmony-arm64@4.54.0': resolution: {integrity: sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==} @@ -393,6 +391,9 @@ packages: '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + '@types/file-saver@2.0.7': + resolution: {integrity: sha512-dNKVfHd/jk0SkR/exKGj2ggkB45MAkzvWCaqLUUgkyjITkGNzH8H+yUwr+BLJUBjZOe9w8X3wgmXhZDRg1ED6A==} + '@types/node@20.19.27': resolution: {integrity: sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==} @@ -467,6 +468,10 @@ packages: '@vueuse/shared@10.11.1': resolution: {integrity: sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==} + adler-32@1.3.1: + resolution: {integrity: sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A==} + engines: {node: '>=0.8'} + alien-signals@1.0.13: resolution: {integrity: sha512-OGj9yyTnJEttvzhTUWuscOvtqxq5vrhF7vL9oS0xJ2mK0ItPYP1/y+vCFebfxoEyAz0++1AIwJ5CMr+Fk3nDmg==} @@ -531,6 +536,10 @@ packages: caniuse-lite@1.0.30001761: resolution: {integrity: sha512-JF9ptu1vP2coz98+5051jZ4PwQgd2ni8A+gYSN7EA7dPKIMf0pDlSUxhdmVOaV3/fYK5uWBkgSXJaRLr4+3A6g==} + cfb@1.2.2: + resolution: {integrity: sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA==} + engines: {node: '>=0.8'} + chart.js@4.5.1: resolution: {integrity: sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==} engines: {pnpm: '>=8'} @@ -543,6 +552,10 @@ packages: resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} engines: {node: '>= 14.16.0'} + codepage@1.15.0: + resolution: {integrity: sha512-3g6NUTPd/YtuuGrhMnOMRjFc+LJw/bnMp3+0r/Wcz3IXUuCosKRJvMphm5+Q+bvTVGcJJuRvVLuYba+WojaFaA==} + engines: {node: '>=0.8'} + combined-stream@1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -551,6 +564,11 @@ packages: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} + crc-32@1.2.2: + resolution: {integrity: sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==} + engines: {node: '>=0.8'} + hasBin: true + cssesc@3.0.0: resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} engines: {node: '>=4'} @@ -630,6 +648,9 @@ packages: picomatch: optional: true + file-saver@2.0.5: + resolution: {integrity: sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==} + fill-range@7.1.1: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} @@ -647,6 +668,10 @@ packages: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} + frac@1.1.2: + resolution: {integrity: sha512-w/XBfkibaTl3YDqASwfDUqkna4Z2p9cFSr1aHDt0WoMTECnRfBOv2WArlZILlqgWlmdIlALXGpM2AOhEk5W3IA==} + engines: {node: '>=0.8'} + fraction.js@5.3.4: resolution: {integrity: sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==} @@ -908,6 +933,10 @@ packages: resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} engines: {node: '>=0.10.0'} + ssf@0.11.2: + resolution: {integrity: sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g==} + engines: {node: '>=0.8'} + strip-ansi@7.1.2: resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==} engines: {node: '>=12'} @@ -1078,6 +1107,19 @@ packages: typescript: optional: true + wmf@1.0.2: + resolution: {integrity: sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw==} + engines: {node: '>=0.8'} + + word@0.3.0: + resolution: {integrity: sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==} + engines: {node: '>=0.8'} + + xlsx@0.18.5: + resolution: {integrity: sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ==} + engines: {node: '>=0.8'} + hasBin: true + snapshots: '@alloc/quick-lru@5.2.0': {} @@ -1278,6 +1320,8 @@ snapshots: '@types/estree@1.0.8': {} + '@types/file-saver@2.0.7': {} + '@types/node@20.19.27': dependencies: undici-types: 6.21.0 @@ -1394,6 +1438,8 @@ snapshots: - '@vue/composition-api' - vue + adler-32@1.3.1: {} + alien-signals@1.0.13: {} ansi-regex@6.2.2: {} @@ -1457,6 +1503,11 @@ snapshots: caniuse-lite@1.0.30001761: {} + cfb@1.2.2: + dependencies: + adler-32: 1.3.1 + crc-32: 1.2.2 + chart.js@4.5.1: dependencies: '@kurkle/color': 0.3.4 @@ -1477,12 +1528,16 @@ snapshots: dependencies: readdirp: 4.1.2 + codepage@1.15.0: {} + combined-stream@1.0.8: dependencies: delayed-stream: 1.0.0 commander@4.1.1: {} + crc-32@1.2.2: {} + cssesc@3.0.0: {} csstype@3.2.3: {} @@ -1568,6 +1623,8 @@ snapshots: optionalDependencies: picomatch: 4.0.3 + file-saver@2.0.5: {} + fill-range@7.1.1: dependencies: to-regex-range: 5.0.1 @@ -1582,6 +1639,8 @@ snapshots: hasown: 2.0.2 mime-types: 2.1.35 + frac@1.1.2: {} + fraction.js@5.3.4: {} fsevents@2.3.3: @@ -1818,6 +1877,10 @@ snapshots: source-map-js@1.2.1: {} + ssf@0.11.2: + dependencies: + frac: 1.1.2 + strip-ansi@7.1.2: dependencies: ansi-regex: 6.2.2 @@ -1960,3 +2023,17 @@ snapshots: '@vue/shared': 3.5.26 optionalDependencies: typescript: 5.6.3 + + wmf@1.0.2: {} + + word@0.3.0: {} + + xlsx@0.18.5: + dependencies: + adler-32: 1.3.1 + cfb: 1.2.2 + codepage: 1.15.0 + crc-32: 1.2.2 + ssf: 0.11.2 + wmf: 1.0.2 + word: 0.3.0 diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index bb1f0f81..916e1b6a 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -666,47 +666,7 @@
-
-

- - - - {{ t('admin.accounts.selectAllowedModels') }} -

-
- - -
- -
- +

{{ t('admin.accounts.selectedModels', { count: allowedModels.length }) }} {{ @@ -1176,6 +1136,7 @@ import { ref, reactive, computed, watch } from 'vue' import { useI18n } from 'vue-i18n' import { useAppStore } from '@/stores/app' +import { claudeModels, getPresetMappingsByPlatform, getModelsByPlatform, commonErrorCodes, buildModelMappingObject } from '@/composables/useModelWhitelist' import { useAuthStore } from '@/stores/auth' import { adminAPI } from '@/api/admin' import { @@ -1190,6 +1151,7 @@ import type { Proxy, Group, AccountPlatform, AccountType } from '@/types' import BaseDialog from '@/components/common/BaseDialog.vue' import ProxySelector from '@/components/common/ProxySelector.vue' import GroupSelector from '@/components/common/GroupSelector.vue' +import ModelWhitelistSelector from '@/components/account/ModelWhitelistSelector.vue' import OAuthAuthorizationFlow from './OAuthAuthorizationFlow.vue' // Type for exposed OAuthAuthorizationFlow component @@ -1302,178 +1264,8 @@ const mixedScheduling = ref(false) // For antigravity accounts: enable mixed sch const geminiOAuthType = ref<'code_assist' | 'ai_studio'>('code_assist') const geminiAIStudioOAuthEnabled = ref(false) -// Common models for whitelist - Anthropic -const anthropicModels = [ - { value: 'claude-opus-4-5-20251101', label: 'Claude Opus 4.5' }, - { value: 'claude-sonnet-4-20250514', label: 'Claude Sonnet 4' }, - { value: 'claude-sonnet-4-5-20250929', label: 'Claude Sonnet 4.5' }, - { value: 'claude-3-5-haiku-20241022', label: 'Claude 3.5 Haiku' }, - { value: 'claude-haiku-4-5-20251001', label: 'Claude Haiku 4.5' }, - { value: 'claude-3-opus-20240229', label: 'Claude 3 Opus' }, - { value: 'claude-3-5-sonnet-20241022', label: 'Claude 3.5 Sonnet' }, - { value: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku' } -] - -// Common models for whitelist - OpenAI -const openaiModels = [ - { value: 'gpt-5.2-2025-12-11', label: 'GPT-5.2' }, - { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex' }, - { value: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max' }, - { value: 'gpt-5.1-codex', label: 'GPT-5.1 Codex' }, - { value: 'gpt-5.1-2025-11-13', label: 'GPT-5.1' }, - { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini' }, - { value: 'gpt-5-2025-08-07', label: 'GPT-5' } -] - -// Common models for whitelist - Gemini -const geminiModels = [ - { value: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' }, - { value: 'gemini-2.0-flash-lite', label: 'Gemini 2.0 Flash Lite' }, - { value: 'gemini-1.5-pro', label: 'Gemini 1.5 Pro' }, - { value: 'gemini-1.5-flash', label: 'Gemini 1.5 Flash' } -] - -// Computed: current models based on platform -const commonModels = computed(() => { - if (form.platform === 'openai') return openaiModels - if (form.platform === 'gemini') return geminiModels - return anthropicModels -}) - -// Preset mappings for quick add - Anthropic -const anthropicPresetMappings = [ - { - label: 'Sonnet 4', - from: 'claude-sonnet-4-20250514', - to: 'claude-sonnet-4-20250514', - color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' - }, - { - label: 'Sonnet 4.5', - from: 'claude-sonnet-4-5-20250929', - to: 'claude-sonnet-4-5-20250929', - color: - 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' - }, - { - label: 'Opus 4.5', - from: 'claude-opus-4-5-20251101', - to: 'claude-opus-4-5-20251101', - color: - 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' - }, - { - label: 'Haiku 3.5', - from: 'claude-3-5-haiku-20241022', - to: 'claude-3-5-haiku-20241022', - color: 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-400' - }, - { - label: 'Haiku 4.5', - from: 'claude-haiku-4-5-20251001', - to: 'claude-haiku-4-5-20251001', - color: - 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' - }, - { - label: 'Opus->Sonnet', - from: 'claude-opus-4-5-20251101', - to: 'claude-sonnet-4-5-20250929', - color: 'bg-amber-100 text-amber-700 hover:bg-amber-200 dark:bg-amber-900/30 dark:text-amber-400' - } -] - -// Preset mappings for quick add - OpenAI -const openaiPresetMappings = [ - { - label: 'GPT-5.2', - from: 'gpt-5.2-2025-12-11', - to: 'gpt-5.2-2025-12-11', - color: 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-400' - }, - { - label: 'GPT-5.2 Codex', - from: 'gpt-5.2-codex', - to: 'gpt-5.2-codex', - color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' - }, - { - label: 'GPT-5.1 Codex', - from: 'gpt-5.1-codex', - to: 'gpt-5.1-codex', - color: - 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' - }, - { - label: 'Codex Max', - from: 'gpt-5.1-codex-max', - to: 'gpt-5.1-codex-max', - color: - 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' - }, - { - label: 'Codex Mini', - from: 'gpt-5.1-codex-mini', - to: 'gpt-5.1-codex-mini', - color: - 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' - }, - { - label: 'Max->Codex', - from: 'gpt-5.1-codex-max', - to: 'gpt-5.1-codex', - color: 'bg-amber-100 text-amber-700 hover:bg-amber-200 dark:bg-amber-900/30 dark:text-amber-400' - } -] - -// Preset mappings for quick add - Gemini -const geminiPresetMappings = [ - { - label: 'Flash', - from: 'gemini-2.0-flash', - to: 'gemini-2.0-flash', - color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' - }, - { - label: 'Flash Lite', - from: 'gemini-2.0-flash-lite', - to: 'gemini-2.0-flash-lite', - color: - 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' - }, - { - label: '1.5 Pro', - from: 'gemini-1.5-pro', - to: 'gemini-1.5-pro', - color: - 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' - }, - { - label: '1.5 Flash', - from: 'gemini-1.5-flash', - to: 'gemini-1.5-flash', - color: - 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' - } -] - // Computed: current preset mappings based on platform -const presetMappings = computed(() => { - if (form.platform === 'openai') return openaiPresetMappings - if (form.platform === 'gemini') return geminiPresetMappings - return anthropicPresetMappings -}) - -// Common HTTP error codes for quick selection -const commonErrorCodes = [ - { value: 401, label: 'Unauthorized' }, - { value: 403, label: 'Forbidden' }, - { value: 429, label: 'Rate Limit' }, - { value: 500, label: 'Server Error' }, - { value: 502, label: 'Bad Gateway' }, - { value: 503, label: 'Unavailable' }, - { value: 529, label: 'Overloaded' } -] +const presetMappings = computed(() => getPresetMappingsByPlatform(form.platform)) const form = reactive({ name: '', @@ -1511,7 +1303,10 @@ const canExchangeCode = computed(() => { watch( () => props.show, (newVal) => { - if (!newVal) { + if (newVal) { + // Modal opened - fill related models + allowedModels.value = [...getModelsByPlatform(form.platform)] + } else { resetForm() } } @@ -1585,6 +1380,16 @@ const handleSelectGeminiOAuthType = (oauthType: 'code_assist' | 'ai_studio') => geminiOAuthType.value = oauthType } +// Auto-fill related models when switching to whitelist mode or changing platform +watch( + [modelRestrictionMode, () => form.platform], + ([newMode]) => { + if (newMode === 'whitelist') { + allowedModels.value = [...getModelsByPlatform(form.platform)] + } + } +) + // Model mapping helpers const addModelMapping = () => { modelMappings.value.push({ from: '', to: '' }) @@ -1595,9 +1400,7 @@ const removeModelMapping = (index: number) => { } const addPresetMapping = (from: string, to: string) => { - // Check if mapping already exists - const exists = modelMappings.value.some((m) => m.from === from) - if (exists) { + if (modelMappings.value.some((m) => m.from === from)) { appStore.showInfo(t('admin.accounts.mappingExists', { model: from })) return } @@ -1637,28 +1440,6 @@ const removeErrorCode = (code: number) => { } } -const buildModelMappingObject = (): Record | null => { - const mapping: Record = {} - - if (modelRestrictionMode.value === 'whitelist') { - // Whitelist mode: map model to itself - for (const model of allowedModels.value) { - mapping[model] = model - } - } else { - // Mapping mode: use custom mappings - for (const m of modelMappings.value) { - const from = m.from.trim() - const to = m.to.trim() - if (from && to) { - mapping[from] = to - } - } - } - - return Object.keys(mapping).length > 0 ? mapping : null -} - // Methods const resetForm = () => { step.value = 1 @@ -1676,7 +1457,7 @@ const resetForm = () => { apiKeyValue.value = '' modelMappings.value = [] modelRestrictionMode.value = 'whitelist' - allowedModels.value = [] + allowedModels.value = [...claudeModels] // Default fill related models customErrorCodesEnabled.value = false selectedErrorCodes.value = [] customErrorCodeInput.value = null @@ -1725,7 +1506,7 @@ const handleSubmit = async () => { } // Add model mapping if configured - const modelMapping = buildModelMappingObject() + const modelMapping = buildModelMappingObject(modelRestrictionMode.value, allowedModels.value, modelMappings.value) if (modelMapping) { credentials.model_mapping = modelMapping } diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index 32a36c77..39280e2c 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -111,47 +111,7 @@

-
-

- - - - {{ t('admin.accounts.selectAllowedModels') }} -

-
- - -
- -
- +

{{ t('admin.accounts.selectedModels', { count: allowedModels.length }) }} {{ @@ -565,6 +525,12 @@ import BaseDialog from '@/components/common/BaseDialog.vue' import Select from '@/components/common/Select.vue' import ProxySelector from '@/components/common/ProxySelector.vue' import GroupSelector from '@/components/common/GroupSelector.vue' +import ModelWhitelistSelector from '@/components/account/ModelWhitelistSelector.vue' +import { + getPresetMappingsByPlatform, + commonErrorCodes, + buildModelMappingObject +} from '@/composables/useModelWhitelist' interface Props { show: boolean @@ -610,167 +576,8 @@ const customErrorCodeInput = ref(null) const interceptWarmupRequests = ref(false) const mixedScheduling = ref(false) // For antigravity accounts: enable mixed scheduling -// Common models for whitelist - Anthropic -const anthropicModels = [ - { value: 'claude-opus-4-5-20251101', label: 'Claude Opus 4.5' }, - { value: 'claude-sonnet-4-20250514', label: 'Claude Sonnet 4' }, - { value: 'claude-sonnet-4-5-20250929', label: 'Claude Sonnet 4.5' }, - { value: 'claude-3-5-haiku-20241022', label: 'Claude 3.5 Haiku' }, - { value: 'claude-haiku-4-5-20251001', label: 'Claude Haiku 4.5' }, - { value: 'claude-3-opus-20240229', label: 'Claude 3 Opus' }, - { value: 'claude-3-5-sonnet-20241022', label: 'Claude 3.5 Sonnet' }, - { value: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku' } -] - -// Common models for whitelist - OpenAI -const openaiModels = [ - { value: 'gpt-5.2-2025-12-11', label: 'GPT-5.2' }, - { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex' }, - { value: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max' }, - { value: 'gpt-5.1-codex', label: 'GPT-5.1 Codex' }, - { value: 'gpt-5.1-2025-11-13', label: 'GPT-5.1' }, - { value: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini' }, - { value: 'gpt-5-2025-08-07', label: 'GPT-5' } -] - -// Common models for whitelist - Gemini -const geminiModels = [ - { value: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' }, - { value: 'gemini-2.0-flash-lite', label: 'Gemini 2.0 Flash Lite' }, - { value: 'gemini-1.5-pro', label: 'Gemini 1.5 Pro' }, - { value: 'gemini-1.5-flash', label: 'Gemini 1.5 Flash' } -] - -// Computed: current models based on platform -const commonModels = computed(() => { - if (props.account?.platform === 'openai') return openaiModels - if (props.account?.platform === 'gemini') return geminiModels - return anthropicModels -}) - -// Preset mappings for quick add - Anthropic -const anthropicPresetMappings = [ - { - label: 'Sonnet 4', - from: 'claude-sonnet-4-20250514', - to: 'claude-sonnet-4-20250514', - color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' - }, - { - label: 'Sonnet 4.5', - from: 'claude-sonnet-4-5-20250929', - to: 'claude-sonnet-4-5-20250929', - color: - 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' - }, - { - label: 'Opus 4.5', - from: 'claude-opus-4-5-20251101', - to: 'claude-opus-4-5-20251101', - color: - 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' - }, - { - label: 'Haiku 3.5', - from: 'claude-3-5-haiku-20241022', - to: 'claude-3-5-haiku-20241022', - color: 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-400' - }, - { - label: 'Haiku 4.5', - from: 'claude-haiku-4-5-20251001', - to: 'claude-haiku-4-5-20251001', - color: - 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' - }, - { - label: 'Opus->Sonnet', - from: 'claude-opus-4-5-20251101', - to: 'claude-sonnet-4-5-20250929', - color: 'bg-amber-100 text-amber-700 hover:bg-amber-200 dark:bg-amber-900/30 dark:text-amber-400' - } -] - -// Preset mappings for quick add - OpenAI -const openaiPresetMappings = [ - { - label: 'GPT-5.2', - from: 'gpt-5.2-2025-12-11', - to: 'gpt-5.2-2025-12-11', - color: 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-400' - }, - { - label: 'GPT-5.2 Codex', - from: 'gpt-5.2-codex', - to: 'gpt-5.2-codex', - color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' - }, - { - label: 'GPT-5.1 Codex', - from: 'gpt-5.1-codex', - to: 'gpt-5.1-codex', - color: - 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' - }, - { - label: 'Codex Max', - from: 'gpt-5.1-codex-max', - to: 'gpt-5.1-codex-max', - color: - 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' - }, - { - label: 'Codex Mini', - from: 'gpt-5.1-codex-mini', - to: 'gpt-5.1-codex-mini', - color: - 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' - }, - { - label: 'Max->Codex', - from: 'gpt-5.1-codex-max', - to: 'gpt-5.1-codex', - color: 'bg-amber-100 text-amber-700 hover:bg-amber-200 dark:bg-amber-900/30 dark:text-amber-400' - } -] - -// Preset mappings for quick add - Gemini -const geminiPresetMappings = [ - { - label: 'Flash', - from: 'gemini-2.0-flash', - to: 'gemini-2.0-flash', - color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' - }, - { - label: 'Flash Lite', - from: 'gemini-2.0-flash-lite', - to: 'gemini-2.0-flash-lite', - color: - 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' - }, - { - label: '1.5 Pro', - from: 'gemini-1.5-pro', - to: 'gemini-1.5-pro', - color: - 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' - }, - { - label: '1.5 Flash', - from: 'gemini-1.5-flash', - to: 'gemini-1.5-flash', - color: - 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' - } -] - // Computed: current preset mappings based on platform -const presetMappings = computed(() => { - if (props.account?.platform === 'openai') return openaiPresetMappings - if (props.account?.platform === 'gemini') return geminiPresetMappings - return anthropicPresetMappings -}) +const presetMappings = computed(() => getPresetMappingsByPlatform(props.account?.platform || 'anthropic')) // Computed: default base URL based on platform const defaultBaseUrl = computed(() => { @@ -779,17 +586,6 @@ const defaultBaseUrl = computed(() => { return 'https://api.anthropic.com' }) -// Common HTTP error codes for quick selection -const commonErrorCodes = [ - { value: 401, label: 'Unauthorized' }, - { value: 403, label: 'Forbidden' }, - { value: 429, label: 'Rate Limit' }, - { value: 500, label: 'Server Error' }, - { value: 502, label: 'Bad Gateway' }, - { value: 503, label: 'Unavailable' }, - { value: 529, label: 'Overloaded' } -] - const form = reactive({ name: '', proxy_id: null as number | null, @@ -940,28 +736,6 @@ const removeErrorCode = (code: number) => { } } -const buildModelMappingObject = (): Record | null => { - const mapping: Record = {} - - if (modelRestrictionMode.value === 'whitelist') { - // Whitelist mode: model maps to itself - for (const model of allowedModels.value) { - mapping[model] = model - } - } else { - // Mapping mode: use the mapping entries - for (const m of modelMappings.value) { - const from = m.from.trim() - const to = m.to.trim() - if (from && to) { - mapping[from] = to - } - } - } - - return Object.keys(mapping).length > 0 ? mapping : null -} - // Methods const handleClose = () => { emit('close') @@ -978,7 +752,7 @@ const handleSubmit = async () => { if (props.account.type === 'apikey') { const currentCredentials = (props.account.credentials as Record) || {} const newBaseUrl = editBaseUrl.value.trim() || defaultBaseUrl.value - const modelMapping = buildModelMappingObject() + const modelMapping = buildModelMappingObject(modelRestrictionMode.value, allowedModels.value, modelMappings.value) // Always update credentials for apikey type to handle model mapping changes const newCredentials: Record = { diff --git a/frontend/src/components/account/ModelWhitelistSelector.vue b/frontend/src/components/account/ModelWhitelistSelector.vue new file mode 100644 index 00000000..b029d376 --- /dev/null +++ b/frontend/src/components/account/ModelWhitelistSelector.vue @@ -0,0 +1,201 @@ + + + diff --git a/frontend/src/components/common/ModelIcon.vue b/frontend/src/components/common/ModelIcon.vue new file mode 100644 index 00000000..2a05bf71 --- /dev/null +++ b/frontend/src/components/common/ModelIcon.vue @@ -0,0 +1,278 @@ + + + + + diff --git a/frontend/src/composables/useModelWhitelist.ts b/frontend/src/composables/useModelWhitelist.ts new file mode 100644 index 00000000..d18bdc5f --- /dev/null +++ b/frontend/src/composables/useModelWhitelist.ts @@ -0,0 +1,299 @@ +// ===================== +// 模型列表(硬编码,与 new-api 一致) +// ===================== + +// OpenAI +const openaiModels = [ + 'gpt-3.5-turbo', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-1106', 'gpt-3.5-turbo-16k', + 'gpt-4', 'gpt-4-turbo', 'gpt-4-turbo-preview', + 'gpt-4o', 'gpt-4o-2024-08-06', 'gpt-4o-2024-11-20', + 'gpt-4o-mini', 'gpt-4o-mini-2024-07-18', + 'gpt-4.5-preview', + 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4.1-nano', + 'o1', 'o1-preview', 'o1-mini', 'o1-pro', + 'o3', 'o3-mini', 'o3-pro', + 'o4-mini', + 'gpt-5', 'gpt-5-mini', 'gpt-5-nano', + 'chatgpt-4o-latest', + 'gpt-4o-audio-preview', 'gpt-4o-realtime-preview' +] + +// Anthropic Claude +export const claudeModels = [ + 'claude-3-5-sonnet-20241022', 'claude-3-5-sonnet-20240620', + 'claude-3-5-haiku-20241022', + 'claude-3-opus-20240229', 'claude-3-sonnet-20240229', 'claude-3-haiku-20240307', + 'claude-3-7-sonnet-20250219', + 'claude-sonnet-4-20250514', 'claude-opus-4-20250514', + 'claude-opus-4-1-20250805', + 'claude-sonnet-4-5-20250929', 'claude-haiku-4-5-20251001', + 'claude-opus-4-5-20251101', + 'claude-2.1', 'claude-2.0', 'claude-instant-1.2' +] + +// Google Gemini +const geminiModels = [ + 'gemini-2.0-flash', 'gemini-2.0-flash-lite-preview', 'gemini-2.0-flash-exp', + 'gemini-2.0-pro-exp', 'gemini-2.0-flash-thinking-exp', + 'gemini-2.5-pro-exp-03-25', 'gemini-2.5-pro-preview-03-25', + 'gemini-3-pro-preview', + 'gemini-1.5-pro', 'gemini-1.5-pro-latest', + 'gemini-1.5-flash', 'gemini-1.5-flash-latest', 'gemini-1.5-flash-8b', + 'gemini-exp-1206' +] + +// 智谱 GLM +const zhipuModels = [ + 'glm-4', 'glm-4v', 'glm-4-plus', 'glm-4-0520', + 'glm-4-air', 'glm-4-airx', 'glm-4-long', 'glm-4-flash', + 'glm-4v-plus', 'glm-4.5', 'glm-4.6', + 'glm-3-turbo', 'glm-4-alltools', + 'chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite', + 'cogview-3', 'cogvideo' +] + +// 阿里 通义千问 +const qwenModels = [ + 'qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-max-longcontext', 'qwen-long', + 'qwen2-72b-instruct', 'qwen2-57b-a14b-instruct', 'qwen2-7b-instruct', + 'qwen2.5-72b-instruct', 'qwen2.5-32b-instruct', 'qwen2.5-14b-instruct', + 'qwen2.5-7b-instruct', 'qwen2.5-3b-instruct', 'qwen2.5-1.5b-instruct', + 'qwen2.5-coder-32b-instruct', 'qwen2.5-coder-14b-instruct', 'qwen2.5-coder-7b-instruct', + 'qwen3-235b-a22b', + 'qwq-32b', 'qwq-32b-preview' +] + +// DeepSeek +const deepseekModels = [ + 'deepseek-chat', 'deepseek-coder', 'deepseek-reasoner', + 'deepseek-v3', 'deepseek-v3-0324', + 'deepseek-r1', 'deepseek-r1-0528', + 'deepseek-r1-distill-qwen-32b', 'deepseek-r1-distill-qwen-14b', 'deepseek-r1-distill-qwen-7b', + 'deepseek-r1-distill-llama-70b', 'deepseek-r1-distill-llama-8b' +] + +// Mistral +const mistralModels = [ + 'mistral-small-latest', 'mistral-medium-latest', 'mistral-large-latest', + 'open-mistral-7b', 'open-mixtral-8x7b', 'open-mixtral-8x22b', + 'codestral-latest', 'codestral-mamba', + 'pixtral-12b-2409', 'pixtral-large-latest' +] + +// Meta Llama +const metaModels = [ + 'llama-3.3-70b-instruct', + 'llama-3.2-90b-vision-instruct', 'llama-3.2-11b-vision-instruct', + 'llama-3.2-3b-instruct', 'llama-3.2-1b-instruct', + 'llama-3.1-405b-instruct', 'llama-3.1-70b-instruct', 'llama-3.1-8b-instruct', + 'llama-3-70b-instruct', 'llama-3-8b-instruct', + 'codellama-70b-instruct', 'codellama-34b-instruct', 'codellama-13b-instruct' +] + +// xAI Grok +const xaiModels = [ + 'grok-4', 'grok-4-0709', + 'grok-3-beta', 'grok-3-mini-beta', 'grok-3-fast-beta', + 'grok-2', 'grok-2-vision', 'grok-2-image', + 'grok-beta', 'grok-vision-beta' +] + +// Cohere +const cohereModels = [ + 'command-a-03-2025', + 'command-r', 'command-r-plus', + 'command-r-08-2024', 'command-r-plus-08-2024', + 'c4ai-aya-23-35b', 'c4ai-aya-23-8b', + 'command', 'command-light' +] + +// Yi (01.AI) +const yiModels = [ + 'yi-large', 'yi-large-turbo', 'yi-large-rag', + 'yi-medium', 'yi-medium-200k', + 'yi-spark', 'yi-vision', + 'yi-1.5-34b-chat', 'yi-1.5-9b-chat', 'yi-1.5-6b-chat' +] + +// Moonshot/Kimi +const moonshotModels = [ + 'moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k', + 'kimi-latest' +] + +// 字节跳动 豆包 +const doubaoModels = [ + 'doubao-pro-256k', 'doubao-pro-128k', 'doubao-pro-32k', 'doubao-pro-4k', + 'doubao-lite-128k', 'doubao-lite-32k', 'doubao-lite-4k', + 'doubao-vision-pro-32k', 'doubao-vision-lite-32k', + 'doubao-1.5-pro-256k', 'doubao-1.5-pro-32k', 'doubao-1.5-lite-32k', + 'doubao-1.5-pro-vision-32k', 'doubao-1.5-thinking-pro' +] + +// MiniMax +const minimaxModels = [ + 'abab6.5-chat', 'abab6.5s-chat', 'abab6.5s-chat-pro', + 'abab6-chat', + 'abab5.5-chat', 'abab5.5s-chat' +] + +// 百度 文心 +const baiduModels = [ + 'ernie-4.0-8k-latest', 'ernie-4.0-8k', 'ernie-4.0-turbo-8k', + 'ernie-3.5-8k', 'ernie-3.5-128k', + 'ernie-speed-8k', 'ernie-speed-128k', 'ernie-speed-pro-128k', + 'ernie-lite-8k', 'ernie-lite-pro-128k', + 'ernie-tiny-8k' +] + +// 讯飞 星火 +const sparkModels = [ + 'spark-desk', 'spark-desk-v1.1', 'spark-desk-v2.1', + 'spark-desk-v3.1', 'spark-desk-v3.5', 'spark-desk-v4.0', + 'spark-lite', 'spark-pro', 'spark-max', 'spark-ultra' +] + +// 腾讯 混元 +const hunyuanModels = [ + 'hunyuan-lite', 'hunyuan-standard', 'hunyuan-standard-256k', + 'hunyuan-pro', 'hunyuan-turbo', 'hunyuan-large', + 'hunyuan-vision', 'hunyuan-code' +] + +// Perplexity +const perplexityModels = [ + 'sonar', 'sonar-pro', 'sonar-reasoning', + 'llama-3-sonar-small-32k-online', 'llama-3-sonar-large-32k-online', + 'llama-3-sonar-small-32k-chat', 'llama-3-sonar-large-32k-chat' +] + +// 所有模型(去重) +const allModelsList: string[] = [ + ...openaiModels, + ...claudeModels, + ...geminiModels, + ...zhipuModels, + ...qwenModels, + ...deepseekModels, + ...mistralModels, + ...metaModels, + ...xaiModels, + ...cohereModels, + ...yiModels, + ...moonshotModels, + ...doubaoModels, + ...minimaxModels, + ...baiduModels, + ...sparkModels, + ...hunyuanModels, + ...perplexityModels +] + +// 转换为下拉选项格式 +export const allModels = allModelsList.map(m => ({ value: m, label: m })) + +// ===================== +// 预设映射 +// ===================== + +const anthropicPresetMappings = [ + { label: 'Sonnet 4', from: 'claude-sonnet-4-20250514', to: 'claude-sonnet-4-20250514', color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' }, + { label: 'Sonnet 4.5', from: 'claude-sonnet-4-5-20250929', to: 'claude-sonnet-4-5-20250929', color: 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' }, + { label: 'Opus 4.5', from: 'claude-opus-4-5-20251101', to: 'claude-opus-4-5-20251101', color: 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' }, + { label: 'Haiku 3.5', from: 'claude-3-5-haiku-20241022', to: 'claude-3-5-haiku-20241022', color: 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-400' }, + { label: 'Haiku 4.5', from: 'claude-haiku-4-5-20251001', to: 'claude-haiku-4-5-20251001', color: 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' }, + { label: 'Opus->Sonnet', from: 'claude-opus-4-5-20251101', to: 'claude-sonnet-4-5-20250929', color: 'bg-amber-100 text-amber-700 hover:bg-amber-200 dark:bg-amber-900/30 dark:text-amber-400' } +] + +const openaiPresetMappings = [ + { label: 'GPT-4o', from: 'gpt-4o', to: 'gpt-4o', color: 'bg-green-100 text-green-700 hover:bg-green-200 dark:bg-green-900/30 dark:text-green-400' }, + { label: 'GPT-4o Mini', from: 'gpt-4o-mini', to: 'gpt-4o-mini', color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' }, + { label: 'GPT-4.1', from: 'gpt-4.1', to: 'gpt-4.1', color: 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' }, + { label: 'o1', from: 'o1', to: 'o1', color: 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' }, + { label: 'o3', from: 'o3', to: 'o3', color: 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' }, + { label: 'GPT-5', from: 'gpt-5', to: 'gpt-5', color: 'bg-amber-100 text-amber-700 hover:bg-amber-200 dark:bg-amber-900/30 dark:text-amber-400' } +] + +const geminiPresetMappings = [ + { label: 'Flash 2.0', from: 'gemini-2.0-flash', to: 'gemini-2.0-flash', color: 'bg-blue-100 text-blue-700 hover:bg-blue-200 dark:bg-blue-900/30 dark:text-blue-400' }, + { label: 'Flash Lite', from: 'gemini-2.0-flash-lite-preview', to: 'gemini-2.0-flash-lite-preview', color: 'bg-indigo-100 text-indigo-700 hover:bg-indigo-200 dark:bg-indigo-900/30 dark:text-indigo-400' }, + { label: '1.5 Pro', from: 'gemini-1.5-pro', to: 'gemini-1.5-pro', color: 'bg-purple-100 text-purple-700 hover:bg-purple-200 dark:bg-purple-900/30 dark:text-purple-400' }, + { label: '1.5 Flash', from: 'gemini-1.5-flash', to: 'gemini-1.5-flash', color: 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' } +] + +// ===================== +// 常用错误码 +// ===================== + +export const commonErrorCodes = [ + { value: 401, label: 'Unauthorized' }, + { value: 403, label: 'Forbidden' }, + { value: 429, label: 'Rate Limit' }, + { value: 500, label: 'Server Error' }, + { value: 502, label: 'Bad Gateway' }, + { value: 503, label: 'Unavailable' }, + { value: 529, label: 'Overloaded' } +] + +// ===================== +// 辅助函数 +// ===================== + +// 按平台获取模型 +export function getModelsByPlatform(platform: string): string[] { + switch (platform) { + case 'openai': return openaiModels + case 'anthropic': + case 'claude': return claudeModels + case 'gemini': return geminiModels + case 'zhipu': return zhipuModels + case 'qwen': return qwenModels + case 'deepseek': return deepseekModels + case 'mistral': return mistralModels + case 'meta': return metaModels + case 'xai': return xaiModels + case 'cohere': return cohereModels + case 'yi': return yiModels + case 'moonshot': return moonshotModels + case 'doubao': return doubaoModels + case 'minimax': return minimaxModels + case 'baidu': return baiduModels + case 'spark': return sparkModels + case 'hunyuan': return hunyuanModels + case 'perplexity': return perplexityModels + default: return claudeModels + } +} + +// 按平台获取预设映射 +export function getPresetMappingsByPlatform(platform: string) { + if (platform === 'openai') return openaiPresetMappings + if (platform === 'gemini') return geminiPresetMappings + return anthropicPresetMappings +} + +// ===================== +// 构建模型映射对象(用于 API) +// ===================== + +export function buildModelMappingObject( + mode: 'whitelist' | 'mapping', + allowedModels: string[], + modelMappings: { from: string; to: string }[] +): Record | null { + const mapping: Record = {} + + if (mode === 'whitelist') { + for (const model of allowedModels) { + mapping[model] = model + } + } else { + for (const m of modelMappings) { + const from = m.from.trim() + const to = m.to.trim() + if (from && to) mapping[from] = to + } + } + + return Object.keys(mapping).length > 0 ? mapping : null +} diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index d153b553..6bfdb54d 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -944,6 +944,15 @@ export default { actualModel: 'Actual model', addMapping: 'Add Mapping', mappingExists: 'Mapping for {model} already exists', + searchModels: 'Search models...', + noMatchingModels: 'No matching models', + fillRelatedModels: 'Fill related models', + clearAllModels: 'Clear all models', + customModelName: 'Custom model name', + enterCustomModelName: 'Enter custom model name', + addModel: 'Add', + modelExists: 'Model already exists', + modelCount: '{count} models', customErrorCodes: 'Custom Error Codes', customErrorCodesHint: 'Only stop scheduling for selected error codes', customErrorCodesWarning: diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index c6105683..218f7de3 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1093,6 +1093,15 @@ export default { actualModel: '实际模型', addMapping: '添加映射', mappingExists: '模型 {model} 的映射已存在', + searchModels: '搜索模型...', + noMatchingModels: '没有匹配的模型', + fillRelatedModels: '填入相关模型', + clearAllModels: '清除所有模型', + customModelName: '自定义模型名称', + enterCustomModelName: '输入自定义模型名称', + addModel: '填入', + modelExists: '该模型已存在', + modelCount: '{count} 个模型', customErrorCodes: '自定义错误码', customErrorCodesHint: '仅对选中的错误码停止调度', customErrorCodesWarning: '仅选中的错误码会停止调度,其他错误将返回 500。', diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json index 4b6a33ba..a1731cfb 100644 --- a/frontend/tsconfig.json +++ b/frontend/tsconfig.json @@ -17,7 +17,8 @@ "noFallthroughCasesInSwitch": true, "paths": { "@/*": ["./src/*"] - } + }, + "types": ["vite/client"] }, "include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue"], "references": [{ "path": "./tsconfig.node.json" }] From 3c3fed886fff516ae9c864e043786595397f6966 Mon Sep 17 00:00:00 2001 From: Edric Li Date: Thu, 1 Jan 2026 18:58:34 +0800 Subject: [PATCH 40/51] feat(backend): add user custom attributes system MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a flexible user attribute system that allows admins to define custom fields for users (text, textarea, number, email, url, date, select, multi_select types). - Add Ent schemas for UserAttributeDefinition and UserAttributeValue - Add service layer with validation logic - Add repository layer with batch operations support - Add admin API endpoints for CRUD and reorder operations - Add batch API for loading attribute values for multiple users - Add database migration (018_user_attributes.sql) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- backend/cmd/server/wire_gen.go | 12 +- backend/ent/client.go | 412 +++- backend/ent/ent.go | 26 +- backend/ent/hook/hook.go | 24 + backend/ent/intercept/intercept.go | 60 + backend/ent/migrate/schema.go | 97 +- backend/ent/mutation.go | 1970 ++++++++++++++++- backend/ent/predicate/predicate.go | 6 + backend/ent/runtime/runtime.go | 132 +- .../ent/schema/user_attribute_definition.go | 109 + backend/ent/schema/user_attribute_value.go | 74 + backend/ent/tx.go | 6 + backend/ent/userattributedefinition.go | 276 +++ .../userattributedefinition.go | 205 ++ backend/ent/userattributedefinition/where.go | 664 ++++++ backend/ent/userattributedefinition_create.go | 1267 +++++++++++ backend/ent/userattributedefinition_delete.go | 88 + backend/ent/userattributedefinition_query.go | 606 +++++ backend/ent/userattributedefinition_update.go | 846 +++++++ backend/ent/userattributevalue.go | 198 ++ .../userattributevalue/userattributevalue.go | 139 ++ backend/ent/userattributevalue/where.go | 327 +++ backend/ent/userattributevalue_create.go | 731 ++++++ backend/ent/userattributevalue_delete.go | 88 + backend/ent/userattributevalue_query.go | 681 ++++++ backend/ent/userattributevalue_update.go | 504 +++++ .../handler/admin/user_attribute_handler.go | 342 +++ backend/internal/handler/handler.go | 1 + backend/internal/handler/wire.go | 3 + .../repository/user_attribute_repo.go | 387 ++++ backend/internal/repository/wire.go | 2 + backend/internal/server/routes/admin.go | 19 + backend/internal/service/user_attribute.go | 125 ++ .../service/user_attribute_service.go | 295 +++ backend/internal/service/wire.go | 1 + backend/migrations/018_user_attributes.sql | 48 + 36 files changed, 10649 insertions(+), 122 deletions(-) create mode 100644 backend/ent/schema/user_attribute_definition.go create mode 100644 backend/ent/schema/user_attribute_value.go create mode 100644 backend/ent/userattributedefinition.go create mode 100644 backend/ent/userattributedefinition/userattributedefinition.go create mode 100644 backend/ent/userattributedefinition/where.go create mode 100644 backend/ent/userattributedefinition_create.go create mode 100644 backend/ent/userattributedefinition_delete.go create mode 100644 backend/ent/userattributedefinition_query.go create mode 100644 backend/ent/userattributedefinition_update.go create mode 100644 backend/ent/userattributevalue.go create mode 100644 backend/ent/userattributevalue/userattributevalue.go create mode 100644 backend/ent/userattributevalue/where.go create mode 100644 backend/ent/userattributevalue_create.go create mode 100644 backend/ent/userattributevalue_delete.go create mode 100644 backend/ent/userattributevalue_query.go create mode 100644 backend/ent/userattributevalue_update.go create mode 100644 backend/internal/handler/admin/user_attribute_handler.go create mode 100644 backend/internal/repository/user_attribute_repo.go create mode 100644 backend/internal/service/user_attribute.go create mode 100644 backend/internal/service/user_attribute_service.go create mode 100644 backend/migrations/018_user_attributes.sql diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index ad74d184..91569497 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -114,15 +114,19 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { gitHubReleaseClient := repository.NewGitHubReleaseClient() serviceBuildInfo := provideServiceBuildInfo(buildInfo) updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo) + systemHandler := handler.ProvideSystemHandler(updateService) + adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) + adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) + userAttributeDefinitionRepository := repository.NewUserAttributeDefinitionRepository(client) + userAttributeValueRepository := repository.NewUserAttributeValueRepository(client, db) + userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository) + userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) pricingRemoteClient := repository.NewPricingRemoteClient() pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient) if err != nil { return nil, err } - systemHandler := handler.ProvideSystemHandler(updateService) - adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) - adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler) billingService := service.NewBillingService(configConfig, pricingService) identityCache := repository.NewIdentityCache(redisClient) identityService := service.NewIdentityService(identityCache) diff --git a/backend/ent/client.go b/backend/ent/client.go index 909226fa..fab70489 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -25,6 +25,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" "github.com/Wei-Shaw/sub2api/ent/usersubscription" stdsql "database/sql" @@ -55,6 +57,10 @@ type Client struct { User *UserClient // UserAllowedGroup is the client for interacting with the UserAllowedGroup builders. UserAllowedGroup *UserAllowedGroupClient + // UserAttributeDefinition is the client for interacting with the UserAttributeDefinition builders. + UserAttributeDefinition *UserAttributeDefinitionClient + // UserAttributeValue is the client for interacting with the UserAttributeValue builders. + UserAttributeValue *UserAttributeValueClient // UserSubscription is the client for interacting with the UserSubscription builders. UserSubscription *UserSubscriptionClient } @@ -78,6 +84,8 @@ func (c *Client) init() { c.UsageLog = NewUsageLogClient(c.config) c.User = NewUserClient(c.config) c.UserAllowedGroup = NewUserAllowedGroupClient(c.config) + c.UserAttributeDefinition = NewUserAttributeDefinitionClient(c.config) + c.UserAttributeValue = NewUserAttributeValueClient(c.config) c.UserSubscription = NewUserSubscriptionClient(c.config) } @@ -169,19 +177,21 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { cfg := c.config cfg.driver = tx return &Tx{ - ctx: ctx, - config: cfg, - Account: NewAccountClient(cfg), - AccountGroup: NewAccountGroupClient(cfg), - ApiKey: NewApiKeyClient(cfg), - Group: NewGroupClient(cfg), - Proxy: NewProxyClient(cfg), - RedeemCode: NewRedeemCodeClient(cfg), - Setting: NewSettingClient(cfg), - UsageLog: NewUsageLogClient(cfg), - User: NewUserClient(cfg), - UserAllowedGroup: NewUserAllowedGroupClient(cfg), - UserSubscription: NewUserSubscriptionClient(cfg), + ctx: ctx, + config: cfg, + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + ApiKey: NewApiKeyClient(cfg), + Group: NewGroupClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + Setting: NewSettingClient(cfg), + UsageLog: NewUsageLogClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), + UserAttributeValue: NewUserAttributeValueClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), }, nil } @@ -199,19 +209,21 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) cfg := c.config cfg.driver = &txDriver{tx: tx, drv: c.driver} return &Tx{ - ctx: ctx, - config: cfg, - Account: NewAccountClient(cfg), - AccountGroup: NewAccountGroupClient(cfg), - ApiKey: NewApiKeyClient(cfg), - Group: NewGroupClient(cfg), - Proxy: NewProxyClient(cfg), - RedeemCode: NewRedeemCodeClient(cfg), - Setting: NewSettingClient(cfg), - UsageLog: NewUsageLogClient(cfg), - User: NewUserClient(cfg), - UserAllowedGroup: NewUserAllowedGroupClient(cfg), - UserSubscription: NewUserSubscriptionClient(cfg), + ctx: ctx, + config: cfg, + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + ApiKey: NewApiKeyClient(cfg), + Group: NewGroupClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + Setting: NewSettingClient(cfg), + UsageLog: NewUsageLogClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), + UserAttributeValue: NewUserAttributeValueClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), }, nil } @@ -242,7 +254,8 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.Account, c.AccountGroup, c.ApiKey, c.Group, c.Proxy, c.RedeemCode, c.Setting, - c.UsageLog, c.User, c.UserAllowedGroup, c.UserSubscription, + c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, + c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) } @@ -253,7 +266,8 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.Account, c.AccountGroup, c.ApiKey, c.Group, c.Proxy, c.RedeemCode, c.Setting, - c.UsageLog, c.User, c.UserAllowedGroup, c.UserSubscription, + c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, + c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) } @@ -282,6 +296,10 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.User.mutate(ctx, m) case *UserAllowedGroupMutation: return c.UserAllowedGroup.mutate(ctx, m) + case *UserAttributeDefinitionMutation: + return c.UserAttributeDefinition.mutate(ctx, m) + case *UserAttributeValueMutation: + return c.UserAttributeValue.mutate(ctx, m) case *UserSubscriptionMutation: return c.UserSubscription.mutate(ctx, m) default: @@ -1916,6 +1934,22 @@ func (c *UserClient) QueryUsageLogs(_m *User) *UsageLogQuery { return query } +// QueryAttributeValues queries the attribute_values edge of a User. +func (c *UserClient) QueryAttributeValues(_m *User) *UserAttributeValueQuery { + query := (&UserAttributeValueClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(userattributevalue.Table, userattributevalue.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AttributeValuesTable, user.AttributeValuesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + // QueryUserAllowedGroups queries the user_allowed_groups edge of a User. func (c *UserClient) QueryUserAllowedGroups(_m *User) *UserAllowedGroupQuery { query := (&UserAllowedGroupClient{config: c.config}).Query() @@ -2075,6 +2109,322 @@ func (c *UserAllowedGroupClient) mutate(ctx context.Context, m *UserAllowedGroup } } +// UserAttributeDefinitionClient is a client for the UserAttributeDefinition schema. +type UserAttributeDefinitionClient struct { + config +} + +// NewUserAttributeDefinitionClient returns a client for the UserAttributeDefinition from the given config. +func NewUserAttributeDefinitionClient(c config) *UserAttributeDefinitionClient { + return &UserAttributeDefinitionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `userattributedefinition.Hooks(f(g(h())))`. +func (c *UserAttributeDefinitionClient) Use(hooks ...Hook) { + c.hooks.UserAttributeDefinition = append(c.hooks.UserAttributeDefinition, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `userattributedefinition.Intercept(f(g(h())))`. +func (c *UserAttributeDefinitionClient) Intercept(interceptors ...Interceptor) { + c.inters.UserAttributeDefinition = append(c.inters.UserAttributeDefinition, interceptors...) +} + +// Create returns a builder for creating a UserAttributeDefinition entity. +func (c *UserAttributeDefinitionClient) Create() *UserAttributeDefinitionCreate { + mutation := newUserAttributeDefinitionMutation(c.config, OpCreate) + return &UserAttributeDefinitionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserAttributeDefinition entities. +func (c *UserAttributeDefinitionClient) CreateBulk(builders ...*UserAttributeDefinitionCreate) *UserAttributeDefinitionCreateBulk { + return &UserAttributeDefinitionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserAttributeDefinitionClient) MapCreateBulk(slice any, setFunc func(*UserAttributeDefinitionCreate, int)) *UserAttributeDefinitionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserAttributeDefinitionCreateBulk{err: fmt.Errorf("calling to UserAttributeDefinitionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserAttributeDefinitionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserAttributeDefinitionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) Update() *UserAttributeDefinitionUpdate { + mutation := newUserAttributeDefinitionMutation(c.config, OpUpdate) + return &UserAttributeDefinitionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserAttributeDefinitionClient) UpdateOne(_m *UserAttributeDefinition) *UserAttributeDefinitionUpdateOne { + mutation := newUserAttributeDefinitionMutation(c.config, OpUpdateOne, withUserAttributeDefinition(_m)) + return &UserAttributeDefinitionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserAttributeDefinitionClient) UpdateOneID(id int64) *UserAttributeDefinitionUpdateOne { + mutation := newUserAttributeDefinitionMutation(c.config, OpUpdateOne, withUserAttributeDefinitionID(id)) + return &UserAttributeDefinitionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) Delete() *UserAttributeDefinitionDelete { + mutation := newUserAttributeDefinitionMutation(c.config, OpDelete) + return &UserAttributeDefinitionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserAttributeDefinitionClient) DeleteOne(_m *UserAttributeDefinition) *UserAttributeDefinitionDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserAttributeDefinitionClient) DeleteOneID(id int64) *UserAttributeDefinitionDeleteOne { + builder := c.Delete().Where(userattributedefinition.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserAttributeDefinitionDeleteOne{builder} +} + +// Query returns a query builder for UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) Query() *UserAttributeDefinitionQuery { + return &UserAttributeDefinitionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserAttributeDefinition}, + inters: c.Interceptors(), + } +} + +// Get returns a UserAttributeDefinition entity by its id. +func (c *UserAttributeDefinitionClient) Get(ctx context.Context, id int64) (*UserAttributeDefinition, error) { + return c.Query().Where(userattributedefinition.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserAttributeDefinitionClient) GetX(ctx context.Context, id int64) *UserAttributeDefinition { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryValues queries the values edge of a UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) QueryValues(_m *UserAttributeDefinition) *UserAttributeValueQuery { + query := (&UserAttributeValueClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(userattributedefinition.Table, userattributedefinition.FieldID, id), + sqlgraph.To(userattributevalue.Table, userattributevalue.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, userattributedefinition.ValuesTable, userattributedefinition.ValuesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserAttributeDefinitionClient) Hooks() []Hook { + hooks := c.hooks.UserAttributeDefinition + return append(hooks[:len(hooks):len(hooks)], userattributedefinition.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *UserAttributeDefinitionClient) Interceptors() []Interceptor { + inters := c.inters.UserAttributeDefinition + return append(inters[:len(inters):len(inters)], userattributedefinition.Interceptors[:]...) +} + +func (c *UserAttributeDefinitionClient) mutate(ctx context.Context, m *UserAttributeDefinitionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserAttributeDefinitionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserAttributeDefinitionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserAttributeDefinitionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserAttributeDefinitionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserAttributeDefinition mutation op: %q", m.Op()) + } +} + +// UserAttributeValueClient is a client for the UserAttributeValue schema. +type UserAttributeValueClient struct { + config +} + +// NewUserAttributeValueClient returns a client for the UserAttributeValue from the given config. +func NewUserAttributeValueClient(c config) *UserAttributeValueClient { + return &UserAttributeValueClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `userattributevalue.Hooks(f(g(h())))`. +func (c *UserAttributeValueClient) Use(hooks ...Hook) { + c.hooks.UserAttributeValue = append(c.hooks.UserAttributeValue, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `userattributevalue.Intercept(f(g(h())))`. +func (c *UserAttributeValueClient) Intercept(interceptors ...Interceptor) { + c.inters.UserAttributeValue = append(c.inters.UserAttributeValue, interceptors...) +} + +// Create returns a builder for creating a UserAttributeValue entity. +func (c *UserAttributeValueClient) Create() *UserAttributeValueCreate { + mutation := newUserAttributeValueMutation(c.config, OpCreate) + return &UserAttributeValueCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserAttributeValue entities. +func (c *UserAttributeValueClient) CreateBulk(builders ...*UserAttributeValueCreate) *UserAttributeValueCreateBulk { + return &UserAttributeValueCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserAttributeValueClient) MapCreateBulk(slice any, setFunc func(*UserAttributeValueCreate, int)) *UserAttributeValueCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserAttributeValueCreateBulk{err: fmt.Errorf("calling to UserAttributeValueClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserAttributeValueCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserAttributeValueCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserAttributeValue. +func (c *UserAttributeValueClient) Update() *UserAttributeValueUpdate { + mutation := newUserAttributeValueMutation(c.config, OpUpdate) + return &UserAttributeValueUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserAttributeValueClient) UpdateOne(_m *UserAttributeValue) *UserAttributeValueUpdateOne { + mutation := newUserAttributeValueMutation(c.config, OpUpdateOne, withUserAttributeValue(_m)) + return &UserAttributeValueUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserAttributeValueClient) UpdateOneID(id int64) *UserAttributeValueUpdateOne { + mutation := newUserAttributeValueMutation(c.config, OpUpdateOne, withUserAttributeValueID(id)) + return &UserAttributeValueUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserAttributeValue. +func (c *UserAttributeValueClient) Delete() *UserAttributeValueDelete { + mutation := newUserAttributeValueMutation(c.config, OpDelete) + return &UserAttributeValueDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserAttributeValueClient) DeleteOne(_m *UserAttributeValue) *UserAttributeValueDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserAttributeValueClient) DeleteOneID(id int64) *UserAttributeValueDeleteOne { + builder := c.Delete().Where(userattributevalue.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserAttributeValueDeleteOne{builder} +} + +// Query returns a query builder for UserAttributeValue. +func (c *UserAttributeValueClient) Query() *UserAttributeValueQuery { + return &UserAttributeValueQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserAttributeValue}, + inters: c.Interceptors(), + } +} + +// Get returns a UserAttributeValue entity by its id. +func (c *UserAttributeValueClient) Get(ctx context.Context, id int64) (*UserAttributeValue, error) { + return c.Query().Where(userattributevalue.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserAttributeValueClient) GetX(ctx context.Context, id int64) *UserAttributeValue { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a UserAttributeValue. +func (c *UserAttributeValueClient) QueryUser(_m *UserAttributeValue) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.UserTable, userattributevalue.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDefinition queries the definition edge of a UserAttributeValue. +func (c *UserAttributeValueClient) QueryDefinition(_m *UserAttributeValue) *UserAttributeDefinitionQuery { + query := (&UserAttributeDefinitionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, id), + sqlgraph.To(userattributedefinition.Table, userattributedefinition.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.DefinitionTable, userattributevalue.DefinitionColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserAttributeValueClient) Hooks() []Hook { + return c.hooks.UserAttributeValue +} + +// Interceptors returns the client interceptors. +func (c *UserAttributeValueClient) Interceptors() []Interceptor { + return c.inters.UserAttributeValue +} + +func (c *UserAttributeValueClient) mutate(ctx context.Context, m *UserAttributeValueMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserAttributeValueCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserAttributeValueUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserAttributeValueUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserAttributeValueDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserAttributeValue mutation op: %q", m.Op()) + } +} + // UserSubscriptionClient is a client for the UserSubscription schema. type UserSubscriptionClient struct { config @@ -2278,11 +2628,13 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription type ( hooks struct { Account, AccountGroup, ApiKey, Group, Proxy, RedeemCode, Setting, UsageLog, - User, UserAllowedGroup, UserSubscription []ent.Hook + User, UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Hook } inters struct { Account, AccountGroup, ApiKey, Group, Proxy, RedeemCode, Setting, UsageLog, - User, UserAllowedGroup, UserSubscription []ent.Interceptor + User, UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index 29890206..49437ad7 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -22,6 +22,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" "github.com/Wei-Shaw/sub2api/ent/usersubscription" ) @@ -83,17 +85,19 @@ var ( func checkColumn(t, c string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ - account.Table: account.ValidColumn, - accountgroup.Table: accountgroup.ValidColumn, - apikey.Table: apikey.ValidColumn, - group.Table: group.ValidColumn, - proxy.Table: proxy.ValidColumn, - redeemcode.Table: redeemcode.ValidColumn, - setting.Table: setting.ValidColumn, - usagelog.Table: usagelog.ValidColumn, - user.Table: user.ValidColumn, - userallowedgroup.Table: userallowedgroup.ValidColumn, - usersubscription.Table: usersubscription.ValidColumn, + account.Table: account.ValidColumn, + accountgroup.Table: accountgroup.ValidColumn, + apikey.Table: apikey.ValidColumn, + group.Table: group.ValidColumn, + proxy.Table: proxy.ValidColumn, + redeemcode.Table: redeemcode.ValidColumn, + setting.Table: setting.ValidColumn, + usagelog.Table: usagelog.ValidColumn, + user.Table: user.ValidColumn, + userallowedgroup.Table: userallowedgroup.ValidColumn, + userattributedefinition.Table: userattributedefinition.ValidColumn, + userattributevalue.Table: userattributevalue.ValidColumn, + usersubscription.Table: usersubscription.ValidColumn, }) }) return columnCheck(t, c) diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index 33955cbb..3aa5d186 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -129,6 +129,30 @@ func (f UserAllowedGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.V return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAllowedGroupMutation", m) } +// The UserAttributeDefinitionFunc type is an adapter to allow the use of ordinary +// function as UserAttributeDefinition mutator. +type UserAttributeDefinitionFunc func(context.Context, *ent.UserAttributeDefinitionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserAttributeDefinitionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserAttributeDefinitionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAttributeDefinitionMutation", m) +} + +// The UserAttributeValueFunc type is an adapter to allow the use of ordinary +// function as UserAttributeValue mutator. +type UserAttributeValueFunc func(context.Context, *ent.UserAttributeValueMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserAttributeValueFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserAttributeValueMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAttributeValueMutation", m) +} + // The UserSubscriptionFunc type is an adapter to allow the use of ordinary // function as UserSubscription mutator. type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionMutation) (ent.Value, error) diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 9815f477..9f694d67 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -19,6 +19,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" "github.com/Wei-Shaw/sub2api/ent/usersubscription" ) @@ -348,6 +350,60 @@ func (f TraverseUserAllowedGroup) Traverse(ctx context.Context, q ent.Query) err return fmt.Errorf("unexpected query type %T. expect *ent.UserAllowedGroupQuery", q) } +// The UserAttributeDefinitionFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserAttributeDefinitionFunc func(context.Context, *ent.UserAttributeDefinitionQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserAttributeDefinitionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserAttributeDefinitionQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeDefinitionQuery", q) +} + +// The TraverseUserAttributeDefinition type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserAttributeDefinition func(context.Context, *ent.UserAttributeDefinitionQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserAttributeDefinition) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserAttributeDefinition) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserAttributeDefinitionQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeDefinitionQuery", q) +} + +// The UserAttributeValueFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserAttributeValueFunc func(context.Context, *ent.UserAttributeValueQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserAttributeValueFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserAttributeValueQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeValueQuery", q) +} + +// The TraverseUserAttributeValue type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserAttributeValue func(context.Context, *ent.UserAttributeValueQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserAttributeValue) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserAttributeValue) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserAttributeValueQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeValueQuery", q) +} + // The UserSubscriptionFunc type is an adapter to allow the use of ordinary function as a Querier. type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionQuery) (ent.Value, error) @@ -398,6 +454,10 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.UserQuery, predicate.User, user.OrderOption]{typ: ent.TypeUser, tq: q}, nil case *ent.UserAllowedGroupQuery: return &query[*ent.UserAllowedGroupQuery, predicate.UserAllowedGroup, userallowedgroup.OrderOption]{typ: ent.TypeUserAllowedGroup, tq: q}, nil + case *ent.UserAttributeDefinitionQuery: + return &query[*ent.UserAttributeDefinitionQuery, predicate.UserAttributeDefinition, userattributedefinition.OrderOption]{typ: ent.TypeUserAttributeDefinition, tq: q}, nil + case *ent.UserAttributeValueQuery: + return &query[*ent.UserAttributeValueQuery, predicate.UserAttributeValue, userattributevalue.OrderOption]{typ: ent.TypeUserAttributeValue, tq: q}, nil case *ent.UserSubscriptionQuery: return &query[*ent.UserSubscriptionQuery, predicate.UserSubscription, usersubscription.OrderOption]{typ: ent.TypeUserSubscription, tq: q}, nil default: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index c9a1675e..d532b34b 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -477,7 +477,6 @@ var ( {Name: "concurrency", Type: field.TypeInt, Default: 5}, {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, {Name: "username", Type: field.TypeString, Size: 100, Default: ""}, - {Name: "wechat", Type: field.TypeString, Size: 100, Default: ""}, {Name: "notes", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}}, } // UsersTable holds the schema information for the "users" table. @@ -531,6 +530,92 @@ var ( }, }, } + // UserAttributeDefinitionsColumns holds the columns for the "user_attribute_definitions" table. + UserAttributeDefinitionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "key", Type: field.TypeString, Size: 100}, + {Name: "name", Type: field.TypeString, Size: 255}, + {Name: "description", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}}, + {Name: "type", Type: field.TypeString, Size: 20}, + {Name: "options", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "required", Type: field.TypeBool, Default: false}, + {Name: "validation", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "placeholder", Type: field.TypeString, Size: 255, Default: ""}, + {Name: "display_order", Type: field.TypeInt, Default: 0}, + {Name: "enabled", Type: field.TypeBool, Default: true}, + } + // UserAttributeDefinitionsTable holds the schema information for the "user_attribute_definitions" table. + UserAttributeDefinitionsTable = &schema.Table{ + Name: "user_attribute_definitions", + Columns: UserAttributeDefinitionsColumns, + PrimaryKey: []*schema.Column{UserAttributeDefinitionsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "userattributedefinition_key", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[4]}, + }, + { + Name: "userattributedefinition_enabled", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[13]}, + }, + { + Name: "userattributedefinition_display_order", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[12]}, + }, + { + Name: "userattributedefinition_deleted_at", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[3]}, + }, + }, + } + // UserAttributeValuesColumns holds the columns for the "user_attribute_values" table. + UserAttributeValuesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "value", Type: field.TypeString, Size: 2147483647, Default: ""}, + {Name: "user_id", Type: field.TypeInt64}, + {Name: "attribute_id", Type: field.TypeInt64}, + } + // UserAttributeValuesTable holds the schema information for the "user_attribute_values" table. + UserAttributeValuesTable = &schema.Table{ + Name: "user_attribute_values", + Columns: UserAttributeValuesColumns, + PrimaryKey: []*schema.Column{UserAttributeValuesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_attribute_values_users_attribute_values", + Columns: []*schema.Column{UserAttributeValuesColumns[4]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_attribute_values_user_attribute_definitions_values", + Columns: []*schema.Column{UserAttributeValuesColumns[5]}, + RefColumns: []*schema.Column{UserAttributeDefinitionsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "userattributevalue_user_id_attribute_id", + Unique: true, + Columns: []*schema.Column{UserAttributeValuesColumns[4], UserAttributeValuesColumns[5]}, + }, + { + Name: "userattributevalue_attribute_id", + Unique: false, + Columns: []*schema.Column{UserAttributeValuesColumns[5]}, + }, + }, + } // UserSubscriptionsColumns holds the columns for the "user_subscriptions" table. UserSubscriptionsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -627,6 +712,8 @@ var ( UsageLogsTable, UsersTable, UserAllowedGroupsTable, + UserAttributeDefinitionsTable, + UserAttributeValuesTable, UserSubscriptionsTable, } ) @@ -676,6 +763,14 @@ func init() { UserAllowedGroupsTable.Annotation = &entsql.Annotation{ Table: "user_allowed_groups", } + UserAttributeDefinitionsTable.Annotation = &entsql.Annotation{ + Table: "user_attribute_definitions", + } + UserAttributeValuesTable.ForeignKeys[0].RefTable = UsersTable + UserAttributeValuesTable.ForeignKeys[1].RefTable = UserAttributeDefinitionsTable + UserAttributeValuesTable.Annotation = &entsql.Annotation{ + Table: "user_attribute_values", + } UserSubscriptionsTable.ForeignKeys[0].RefTable = GroupsTable UserSubscriptionsTable.ForeignKeys[1].RefTable = UsersTable UserSubscriptionsTable.ForeignKeys[2].RefTable = UsersTable diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 9e4359ab..7d5fd2ad 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -22,6 +22,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" "github.com/Wei-Shaw/sub2api/ent/usersubscription" ) @@ -34,17 +36,19 @@ const ( OpUpdateOne = ent.OpUpdateOne // Node types. - TypeAccount = "Account" - TypeAccountGroup = "AccountGroup" - TypeApiKey = "ApiKey" - TypeGroup = "Group" - TypeProxy = "Proxy" - TypeRedeemCode = "RedeemCode" - TypeSetting = "Setting" - TypeUsageLog = "UsageLog" - TypeUser = "User" - TypeUserAllowedGroup = "UserAllowedGroup" - TypeUserSubscription = "UserSubscription" + TypeAccount = "Account" + TypeAccountGroup = "AccountGroup" + TypeApiKey = "ApiKey" + TypeGroup = "Group" + TypeProxy = "Proxy" + TypeRedeemCode = "RedeemCode" + TypeSetting = "Setting" + TypeUsageLog = "UsageLog" + TypeUser = "User" + TypeUserAllowedGroup = "UserAllowedGroup" + TypeUserAttributeDefinition = "UserAttributeDefinition" + TypeUserAttributeValue = "UserAttributeValue" + TypeUserSubscription = "UserSubscription" ) // AccountMutation represents an operation that mutates the Account nodes in the graph. @@ -10158,7 +10162,6 @@ type UserMutation struct { addconcurrency *int status *string username *string - wechat *string notes *string clearedFields map[string]struct{} api_keys map[int64]struct{} @@ -10179,6 +10182,9 @@ type UserMutation struct { usage_logs map[int64]struct{} removedusage_logs map[int64]struct{} clearedusage_logs bool + attribute_values map[int64]struct{} + removedattribute_values map[int64]struct{} + clearedattribute_values bool done bool oldValue func(context.Context) (*User, error) predicates []predicate.User @@ -10695,42 +10701,6 @@ func (m *UserMutation) ResetUsername() { m.username = nil } -// SetWechat sets the "wechat" field. -func (m *UserMutation) SetWechat(s string) { - m.wechat = &s -} - -// Wechat returns the value of the "wechat" field in the mutation. -func (m *UserMutation) Wechat() (r string, exists bool) { - v := m.wechat - if v == nil { - return - } - return *v, true -} - -// OldWechat returns the old "wechat" field's value of the User entity. -// If the User object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *UserMutation) OldWechat(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldWechat is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldWechat requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldWechat: %w", err) - } - return oldValue.Wechat, nil -} - -// ResetWechat resets all changes to the "wechat" field. -func (m *UserMutation) ResetWechat() { - m.wechat = nil -} - // SetNotes sets the "notes" field. func (m *UserMutation) SetNotes(s string) { m.notes = &s @@ -11091,6 +11061,60 @@ func (m *UserMutation) ResetUsageLogs() { m.removedusage_logs = nil } +// AddAttributeValueIDs adds the "attribute_values" edge to the UserAttributeValue entity by ids. +func (m *UserMutation) AddAttributeValueIDs(ids ...int64) { + if m.attribute_values == nil { + m.attribute_values = make(map[int64]struct{}) + } + for i := range ids { + m.attribute_values[ids[i]] = struct{}{} + } +} + +// ClearAttributeValues clears the "attribute_values" edge to the UserAttributeValue entity. +func (m *UserMutation) ClearAttributeValues() { + m.clearedattribute_values = true +} + +// AttributeValuesCleared reports if the "attribute_values" edge to the UserAttributeValue entity was cleared. +func (m *UserMutation) AttributeValuesCleared() bool { + return m.clearedattribute_values +} + +// RemoveAttributeValueIDs removes the "attribute_values" edge to the UserAttributeValue entity by IDs. +func (m *UserMutation) RemoveAttributeValueIDs(ids ...int64) { + if m.removedattribute_values == nil { + m.removedattribute_values = make(map[int64]struct{}) + } + for i := range ids { + delete(m.attribute_values, ids[i]) + m.removedattribute_values[ids[i]] = struct{}{} + } +} + +// RemovedAttributeValues returns the removed IDs of the "attribute_values" edge to the UserAttributeValue entity. +func (m *UserMutation) RemovedAttributeValuesIDs() (ids []int64) { + for id := range m.removedattribute_values { + ids = append(ids, id) + } + return +} + +// AttributeValuesIDs returns the "attribute_values" edge IDs in the mutation. +func (m *UserMutation) AttributeValuesIDs() (ids []int64) { + for id := range m.attribute_values { + ids = append(ids, id) + } + return +} + +// ResetAttributeValues resets all changes to the "attribute_values" edge. +func (m *UserMutation) ResetAttributeValues() { + m.attribute_values = nil + m.clearedattribute_values = false + m.removedattribute_values = nil +} + // Where appends a list predicates to the UserMutation builder. func (m *UserMutation) Where(ps ...predicate.User) { m.predicates = append(m.predicates, ps...) @@ -11125,7 +11149,7 @@ func (m *UserMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *UserMutation) Fields() []string { - fields := make([]string, 0, 12) + fields := make([]string, 0, 11) if m.created_at != nil { fields = append(fields, user.FieldCreatedAt) } @@ -11156,9 +11180,6 @@ func (m *UserMutation) Fields() []string { if m.username != nil { fields = append(fields, user.FieldUsername) } - if m.wechat != nil { - fields = append(fields, user.FieldWechat) - } if m.notes != nil { fields = append(fields, user.FieldNotes) } @@ -11190,8 +11211,6 @@ func (m *UserMutation) Field(name string) (ent.Value, bool) { return m.Status() case user.FieldUsername: return m.Username() - case user.FieldWechat: - return m.Wechat() case user.FieldNotes: return m.Notes() } @@ -11223,8 +11242,6 @@ func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, er return m.OldStatus(ctx) case user.FieldUsername: return m.OldUsername(ctx) - case user.FieldWechat: - return m.OldWechat(ctx) case user.FieldNotes: return m.OldNotes(ctx) } @@ -11306,13 +11323,6 @@ func (m *UserMutation) SetField(name string, value ent.Value) error { } m.SetUsername(v) return nil - case user.FieldWechat: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetWechat(v) - return nil case user.FieldNotes: v, ok := value.(string) if !ok { @@ -11435,9 +11445,6 @@ func (m *UserMutation) ResetField(name string) error { case user.FieldUsername: m.ResetUsername() return nil - case user.FieldWechat: - m.ResetWechat() - return nil case user.FieldNotes: m.ResetNotes() return nil @@ -11447,7 +11454,7 @@ func (m *UserMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *UserMutation) AddedEdges() []string { - edges := make([]string, 0, 6) + edges := make([]string, 0, 7) if m.api_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -11466,6 +11473,9 @@ func (m *UserMutation) AddedEdges() []string { if m.usage_logs != nil { edges = append(edges, user.EdgeUsageLogs) } + if m.attribute_values != nil { + edges = append(edges, user.EdgeAttributeValues) + } return edges } @@ -11509,13 +11519,19 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeAttributeValues: + ids := make([]ent.Value, 0, len(m.attribute_values)) + for id := range m.attribute_values { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *UserMutation) RemovedEdges() []string { - edges := make([]string, 0, 6) + edges := make([]string, 0, 7) if m.removedapi_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -11534,6 +11550,9 @@ func (m *UserMutation) RemovedEdges() []string { if m.removedusage_logs != nil { edges = append(edges, user.EdgeUsageLogs) } + if m.removedattribute_values != nil { + edges = append(edges, user.EdgeAttributeValues) + } return edges } @@ -11577,13 +11596,19 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeAttributeValues: + ids := make([]ent.Value, 0, len(m.removedattribute_values)) + for id := range m.removedattribute_values { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *UserMutation) ClearedEdges() []string { - edges := make([]string, 0, 6) + edges := make([]string, 0, 7) if m.clearedapi_keys { edges = append(edges, user.EdgeAPIKeys) } @@ -11602,6 +11627,9 @@ func (m *UserMutation) ClearedEdges() []string { if m.clearedusage_logs { edges = append(edges, user.EdgeUsageLogs) } + if m.clearedattribute_values { + edges = append(edges, user.EdgeAttributeValues) + } return edges } @@ -11621,6 +11649,8 @@ func (m *UserMutation) EdgeCleared(name string) bool { return m.clearedallowed_groups case user.EdgeUsageLogs: return m.clearedusage_logs + case user.EdgeAttributeValues: + return m.clearedattribute_values } return false } @@ -11655,6 +11685,9 @@ func (m *UserMutation) ResetEdge(name string) error { case user.EdgeUsageLogs: m.ResetUsageLogs() return nil + case user.EdgeAttributeValues: + m.ResetAttributeValues() + return nil } return fmt.Errorf("unknown User edge %s", name) } @@ -12076,6 +12109,1805 @@ func (m *UserAllowedGroupMutation) ResetEdge(name string) error { return fmt.Errorf("unknown UserAllowedGroup edge %s", name) } +// UserAttributeDefinitionMutation represents an operation that mutates the UserAttributeDefinition nodes in the graph. +type UserAttributeDefinitionMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + key *string + name *string + description *string + _type *string + options *[]map[string]interface{} + appendoptions []map[string]interface{} + required *bool + validation *map[string]interface{} + placeholder *string + display_order *int + adddisplay_order *int + enabled *bool + clearedFields map[string]struct{} + values map[int64]struct{} + removedvalues map[int64]struct{} + clearedvalues bool + done bool + oldValue func(context.Context) (*UserAttributeDefinition, error) + predicates []predicate.UserAttributeDefinition +} + +var _ ent.Mutation = (*UserAttributeDefinitionMutation)(nil) + +// userattributedefinitionOption allows management of the mutation configuration using functional options. +type userattributedefinitionOption func(*UserAttributeDefinitionMutation) + +// newUserAttributeDefinitionMutation creates new mutation for the UserAttributeDefinition entity. +func newUserAttributeDefinitionMutation(c config, op Op, opts ...userattributedefinitionOption) *UserAttributeDefinitionMutation { + m := &UserAttributeDefinitionMutation{ + config: c, + op: op, + typ: TypeUserAttributeDefinition, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserAttributeDefinitionID sets the ID field of the mutation. +func withUserAttributeDefinitionID(id int64) userattributedefinitionOption { + return func(m *UserAttributeDefinitionMutation) { + var ( + err error + once sync.Once + value *UserAttributeDefinition + ) + m.oldValue = func(ctx context.Context) (*UserAttributeDefinition, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UserAttributeDefinition.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUserAttributeDefinition sets the old UserAttributeDefinition of the mutation. +func withUserAttributeDefinition(node *UserAttributeDefinition) userattributedefinitionOption { + return func(m *UserAttributeDefinitionMutation) { + m.oldValue = func(context.Context) (*UserAttributeDefinition, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserAttributeDefinitionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserAttributeDefinitionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserAttributeDefinitionMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserAttributeDefinitionMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UserAttributeDefinition.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserAttributeDefinitionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserAttributeDefinitionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserAttributeDefinitionMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserAttributeDefinitionMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserAttributeDefinitionMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserAttributeDefinitionMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *UserAttributeDefinitionMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *UserAttributeDefinitionMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *UserAttributeDefinitionMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[userattributedefinition.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *UserAttributeDefinitionMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[userattributedefinition.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *UserAttributeDefinitionMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, userattributedefinition.FieldDeletedAt) +} + +// SetKey sets the "key" field. +func (m *UserAttributeDefinitionMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the value of the "key" field in the mutation. +func (m *UserAttributeDefinitionMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old "key" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey resets all changes to the "key" field. +func (m *UserAttributeDefinitionMutation) ResetKey() { + m.key = nil +} + +// SetName sets the "name" field. +func (m *UserAttributeDefinitionMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *UserAttributeDefinitionMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *UserAttributeDefinitionMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *UserAttributeDefinitionMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *UserAttributeDefinitionMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ResetDescription resets all changes to the "description" field. +func (m *UserAttributeDefinitionMutation) ResetDescription() { + m.description = nil +} + +// SetType sets the "type" field. +func (m *UserAttributeDefinitionMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *UserAttributeDefinitionMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *UserAttributeDefinitionMutation) ResetType() { + m._type = nil +} + +// SetOptions sets the "options" field. +func (m *UserAttributeDefinitionMutation) SetOptions(value []map[string]interface{}) { + m.options = &value + m.appendoptions = nil +} + +// Options returns the value of the "options" field in the mutation. +func (m *UserAttributeDefinitionMutation) Options() (r []map[string]interface{}, exists bool) { + v := m.options + if v == nil { + return + } + return *v, true +} + +// OldOptions returns the old "options" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldOptions(ctx context.Context) (v []map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOptions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOptions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOptions: %w", err) + } + return oldValue.Options, nil +} + +// AppendOptions adds value to the "options" field. +func (m *UserAttributeDefinitionMutation) AppendOptions(value []map[string]interface{}) { + m.appendoptions = append(m.appendoptions, value...) +} + +// AppendedOptions returns the list of values that were appended to the "options" field in this mutation. +func (m *UserAttributeDefinitionMutation) AppendedOptions() ([]map[string]interface{}, bool) { + if len(m.appendoptions) == 0 { + return nil, false + } + return m.appendoptions, true +} + +// ResetOptions resets all changes to the "options" field. +func (m *UserAttributeDefinitionMutation) ResetOptions() { + m.options = nil + m.appendoptions = nil +} + +// SetRequired sets the "required" field. +func (m *UserAttributeDefinitionMutation) SetRequired(b bool) { + m.required = &b +} + +// Required returns the value of the "required" field in the mutation. +func (m *UserAttributeDefinitionMutation) Required() (r bool, exists bool) { + v := m.required + if v == nil { + return + } + return *v, true +} + +// OldRequired returns the old "required" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldRequired(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRequired is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRequired requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRequired: %w", err) + } + return oldValue.Required, nil +} + +// ResetRequired resets all changes to the "required" field. +func (m *UserAttributeDefinitionMutation) ResetRequired() { + m.required = nil +} + +// SetValidation sets the "validation" field. +func (m *UserAttributeDefinitionMutation) SetValidation(value map[string]interface{}) { + m.validation = &value +} + +// Validation returns the value of the "validation" field in the mutation. +func (m *UserAttributeDefinitionMutation) Validation() (r map[string]interface{}, exists bool) { + v := m.validation + if v == nil { + return + } + return *v, true +} + +// OldValidation returns the old "validation" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldValidation(ctx context.Context) (v map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValidation is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValidation requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValidation: %w", err) + } + return oldValue.Validation, nil +} + +// ResetValidation resets all changes to the "validation" field. +func (m *UserAttributeDefinitionMutation) ResetValidation() { + m.validation = nil +} + +// SetPlaceholder sets the "placeholder" field. +func (m *UserAttributeDefinitionMutation) SetPlaceholder(s string) { + m.placeholder = &s +} + +// Placeholder returns the value of the "placeholder" field in the mutation. +func (m *UserAttributeDefinitionMutation) Placeholder() (r string, exists bool) { + v := m.placeholder + if v == nil { + return + } + return *v, true +} + +// OldPlaceholder returns the old "placeholder" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldPlaceholder(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPlaceholder is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPlaceholder requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPlaceholder: %w", err) + } + return oldValue.Placeholder, nil +} + +// ResetPlaceholder resets all changes to the "placeholder" field. +func (m *UserAttributeDefinitionMutation) ResetPlaceholder() { + m.placeholder = nil +} + +// SetDisplayOrder sets the "display_order" field. +func (m *UserAttributeDefinitionMutation) SetDisplayOrder(i int) { + m.display_order = &i + m.adddisplay_order = nil +} + +// DisplayOrder returns the value of the "display_order" field in the mutation. +func (m *UserAttributeDefinitionMutation) DisplayOrder() (r int, exists bool) { + v := m.display_order + if v == nil { + return + } + return *v, true +} + +// OldDisplayOrder returns the old "display_order" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldDisplayOrder(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDisplayOrder is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDisplayOrder requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDisplayOrder: %w", err) + } + return oldValue.DisplayOrder, nil +} + +// AddDisplayOrder adds i to the "display_order" field. +func (m *UserAttributeDefinitionMutation) AddDisplayOrder(i int) { + if m.adddisplay_order != nil { + *m.adddisplay_order += i + } else { + m.adddisplay_order = &i + } +} + +// AddedDisplayOrder returns the value that was added to the "display_order" field in this mutation. +func (m *UserAttributeDefinitionMutation) AddedDisplayOrder() (r int, exists bool) { + v := m.adddisplay_order + if v == nil { + return + } + return *v, true +} + +// ResetDisplayOrder resets all changes to the "display_order" field. +func (m *UserAttributeDefinitionMutation) ResetDisplayOrder() { + m.display_order = nil + m.adddisplay_order = nil +} + +// SetEnabled sets the "enabled" field. +func (m *UserAttributeDefinitionMutation) SetEnabled(b bool) { + m.enabled = &b +} + +// Enabled returns the value of the "enabled" field in the mutation. +func (m *UserAttributeDefinitionMutation) Enabled() (r bool, exists bool) { + v := m.enabled + if v == nil { + return + } + return *v, true +} + +// OldEnabled returns the old "enabled" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEnabled: %w", err) + } + return oldValue.Enabled, nil +} + +// ResetEnabled resets all changes to the "enabled" field. +func (m *UserAttributeDefinitionMutation) ResetEnabled() { + m.enabled = nil +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by ids. +func (m *UserAttributeDefinitionMutation) AddValueIDs(ids ...int64) { + if m.values == nil { + m.values = make(map[int64]struct{}) + } + for i := range ids { + m.values[ids[i]] = struct{}{} + } +} + +// ClearValues clears the "values" edge to the UserAttributeValue entity. +func (m *UserAttributeDefinitionMutation) ClearValues() { + m.clearedvalues = true +} + +// ValuesCleared reports if the "values" edge to the UserAttributeValue entity was cleared. +func (m *UserAttributeDefinitionMutation) ValuesCleared() bool { + return m.clearedvalues +} + +// RemoveValueIDs removes the "values" edge to the UserAttributeValue entity by IDs. +func (m *UserAttributeDefinitionMutation) RemoveValueIDs(ids ...int64) { + if m.removedvalues == nil { + m.removedvalues = make(map[int64]struct{}) + } + for i := range ids { + delete(m.values, ids[i]) + m.removedvalues[ids[i]] = struct{}{} + } +} + +// RemovedValues returns the removed IDs of the "values" edge to the UserAttributeValue entity. +func (m *UserAttributeDefinitionMutation) RemovedValuesIDs() (ids []int64) { + for id := range m.removedvalues { + ids = append(ids, id) + } + return +} + +// ValuesIDs returns the "values" edge IDs in the mutation. +func (m *UserAttributeDefinitionMutation) ValuesIDs() (ids []int64) { + for id := range m.values { + ids = append(ids, id) + } + return +} + +// ResetValues resets all changes to the "values" edge. +func (m *UserAttributeDefinitionMutation) ResetValues() { + m.values = nil + m.clearedvalues = false + m.removedvalues = nil +} + +// Where appends a list predicates to the UserAttributeDefinitionMutation builder. +func (m *UserAttributeDefinitionMutation) Where(ps ...predicate.UserAttributeDefinition) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserAttributeDefinitionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserAttributeDefinitionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserAttributeDefinition, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserAttributeDefinitionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserAttributeDefinitionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserAttributeDefinition). +func (m *UserAttributeDefinitionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserAttributeDefinitionMutation) Fields() []string { + fields := make([]string, 0, 13) + if m.created_at != nil { + fields = append(fields, userattributedefinition.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, userattributedefinition.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, userattributedefinition.FieldDeletedAt) + } + if m.key != nil { + fields = append(fields, userattributedefinition.FieldKey) + } + if m.name != nil { + fields = append(fields, userattributedefinition.FieldName) + } + if m.description != nil { + fields = append(fields, userattributedefinition.FieldDescription) + } + if m._type != nil { + fields = append(fields, userattributedefinition.FieldType) + } + if m.options != nil { + fields = append(fields, userattributedefinition.FieldOptions) + } + if m.required != nil { + fields = append(fields, userattributedefinition.FieldRequired) + } + if m.validation != nil { + fields = append(fields, userattributedefinition.FieldValidation) + } + if m.placeholder != nil { + fields = append(fields, userattributedefinition.FieldPlaceholder) + } + if m.display_order != nil { + fields = append(fields, userattributedefinition.FieldDisplayOrder) + } + if m.enabled != nil { + fields = append(fields, userattributedefinition.FieldEnabled) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserAttributeDefinitionMutation) Field(name string) (ent.Value, bool) { + switch name { + case userattributedefinition.FieldCreatedAt: + return m.CreatedAt() + case userattributedefinition.FieldUpdatedAt: + return m.UpdatedAt() + case userattributedefinition.FieldDeletedAt: + return m.DeletedAt() + case userattributedefinition.FieldKey: + return m.Key() + case userattributedefinition.FieldName: + return m.Name() + case userattributedefinition.FieldDescription: + return m.Description() + case userattributedefinition.FieldType: + return m.GetType() + case userattributedefinition.FieldOptions: + return m.Options() + case userattributedefinition.FieldRequired: + return m.Required() + case userattributedefinition.FieldValidation: + return m.Validation() + case userattributedefinition.FieldPlaceholder: + return m.Placeholder() + case userattributedefinition.FieldDisplayOrder: + return m.DisplayOrder() + case userattributedefinition.FieldEnabled: + return m.Enabled() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserAttributeDefinitionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case userattributedefinition.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case userattributedefinition.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case userattributedefinition.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case userattributedefinition.FieldKey: + return m.OldKey(ctx) + case userattributedefinition.FieldName: + return m.OldName(ctx) + case userattributedefinition.FieldDescription: + return m.OldDescription(ctx) + case userattributedefinition.FieldType: + return m.OldType(ctx) + case userattributedefinition.FieldOptions: + return m.OldOptions(ctx) + case userattributedefinition.FieldRequired: + return m.OldRequired(ctx) + case userattributedefinition.FieldValidation: + return m.OldValidation(ctx) + case userattributedefinition.FieldPlaceholder: + return m.OldPlaceholder(ctx) + case userattributedefinition.FieldDisplayOrder: + return m.OldDisplayOrder(ctx) + case userattributedefinition.FieldEnabled: + return m.OldEnabled(ctx) + } + return nil, fmt.Errorf("unknown UserAttributeDefinition field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeDefinitionMutation) SetField(name string, value ent.Value) error { + switch name { + case userattributedefinition.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case userattributedefinition.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case userattributedefinition.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case userattributedefinition.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case userattributedefinition.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case userattributedefinition.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case userattributedefinition.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case userattributedefinition.FieldOptions: + v, ok := value.([]map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOptions(v) + return nil + case userattributedefinition.FieldRequired: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRequired(v) + return nil + case userattributedefinition.FieldValidation: + v, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValidation(v) + return nil + case userattributedefinition.FieldPlaceholder: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPlaceholder(v) + return nil + case userattributedefinition.FieldDisplayOrder: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDisplayOrder(v) + return nil + case userattributedefinition.FieldEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEnabled(v) + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserAttributeDefinitionMutation) AddedFields() []string { + var fields []string + if m.adddisplay_order != nil { + fields = append(fields, userattributedefinition.FieldDisplayOrder) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserAttributeDefinitionMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case userattributedefinition.FieldDisplayOrder: + return m.AddedDisplayOrder() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeDefinitionMutation) AddField(name string, value ent.Value) error { + switch name { + case userattributedefinition.FieldDisplayOrder: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDisplayOrder(v) + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserAttributeDefinitionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(userattributedefinition.FieldDeletedAt) { + fields = append(fields, userattributedefinition.FieldDeletedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserAttributeDefinitionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ClearField(name string) error { + switch name { + case userattributedefinition.FieldDeletedAt: + m.ClearDeletedAt() + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ResetField(name string) error { + switch name { + case userattributedefinition.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case userattributedefinition.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case userattributedefinition.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case userattributedefinition.FieldKey: + m.ResetKey() + return nil + case userattributedefinition.FieldName: + m.ResetName() + return nil + case userattributedefinition.FieldDescription: + m.ResetDescription() + return nil + case userattributedefinition.FieldType: + m.ResetType() + return nil + case userattributedefinition.FieldOptions: + m.ResetOptions() + return nil + case userattributedefinition.FieldRequired: + m.ResetRequired() + return nil + case userattributedefinition.FieldValidation: + m.ResetValidation() + return nil + case userattributedefinition.FieldPlaceholder: + m.ResetPlaceholder() + return nil + case userattributedefinition.FieldDisplayOrder: + m.ResetDisplayOrder() + return nil + case userattributedefinition.FieldEnabled: + m.ResetEnabled() + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserAttributeDefinitionMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.values != nil { + edges = append(edges, userattributedefinition.EdgeValues) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserAttributeDefinitionMutation) AddedIDs(name string) []ent.Value { + switch name { + case userattributedefinition.EdgeValues: + ids := make([]ent.Value, 0, len(m.values)) + for id := range m.values { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserAttributeDefinitionMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedvalues != nil { + edges = append(edges, userattributedefinition.EdgeValues) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserAttributeDefinitionMutation) RemovedIDs(name string) []ent.Value { + switch name { + case userattributedefinition.EdgeValues: + ids := make([]ent.Value, 0, len(m.removedvalues)) + for id := range m.removedvalues { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserAttributeDefinitionMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedvalues { + edges = append(edges, userattributedefinition.EdgeValues) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserAttributeDefinitionMutation) EdgeCleared(name string) bool { + switch name { + case userattributedefinition.EdgeValues: + return m.clearedvalues + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown UserAttributeDefinition unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ResetEdge(name string) error { + switch name { + case userattributedefinition.EdgeValues: + m.ResetValues() + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition edge %s", name) +} + +// UserAttributeValueMutation represents an operation that mutates the UserAttributeValue nodes in the graph. +type UserAttributeValueMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + value *string + clearedFields map[string]struct{} + user *int64 + cleareduser bool + definition *int64 + cleareddefinition bool + done bool + oldValue func(context.Context) (*UserAttributeValue, error) + predicates []predicate.UserAttributeValue +} + +var _ ent.Mutation = (*UserAttributeValueMutation)(nil) + +// userattributevalueOption allows management of the mutation configuration using functional options. +type userattributevalueOption func(*UserAttributeValueMutation) + +// newUserAttributeValueMutation creates new mutation for the UserAttributeValue entity. +func newUserAttributeValueMutation(c config, op Op, opts ...userattributevalueOption) *UserAttributeValueMutation { + m := &UserAttributeValueMutation{ + config: c, + op: op, + typ: TypeUserAttributeValue, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserAttributeValueID sets the ID field of the mutation. +func withUserAttributeValueID(id int64) userattributevalueOption { + return func(m *UserAttributeValueMutation) { + var ( + err error + once sync.Once + value *UserAttributeValue + ) + m.oldValue = func(ctx context.Context) (*UserAttributeValue, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UserAttributeValue.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUserAttributeValue sets the old UserAttributeValue of the mutation. +func withUserAttributeValue(node *UserAttributeValue) userattributevalueOption { + return func(m *UserAttributeValueMutation) { + m.oldValue = func(context.Context) (*UserAttributeValue, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserAttributeValueMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserAttributeValueMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserAttributeValueMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserAttributeValueMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UserAttributeValue.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserAttributeValueMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserAttributeValueMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserAttributeValueMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserAttributeValueMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserAttributeValueMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserAttributeValueMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetUserID sets the "user_id" field. +func (m *UserAttributeValueMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *UserAttributeValueMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *UserAttributeValueMutation) ResetUserID() { + m.user = nil +} + +// SetAttributeID sets the "attribute_id" field. +func (m *UserAttributeValueMutation) SetAttributeID(i int64) { + m.definition = &i +} + +// AttributeID returns the value of the "attribute_id" field in the mutation. +func (m *UserAttributeValueMutation) AttributeID() (r int64, exists bool) { + v := m.definition + if v == nil { + return + } + return *v, true +} + +// OldAttributeID returns the old "attribute_id" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldAttributeID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAttributeID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAttributeID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAttributeID: %w", err) + } + return oldValue.AttributeID, nil +} + +// ResetAttributeID resets all changes to the "attribute_id" field. +func (m *UserAttributeValueMutation) ResetAttributeID() { + m.definition = nil +} + +// SetValue sets the "value" field. +func (m *UserAttributeValueMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *UserAttributeValueMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *UserAttributeValueMutation) ResetValue() { + m.value = nil +} + +// ClearUser clears the "user" edge to the User entity. +func (m *UserAttributeValueMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[userattributevalue.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *UserAttributeValueMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *UserAttributeValueMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *UserAttributeValueMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by id. +func (m *UserAttributeValueMutation) SetDefinitionID(id int64) { + m.definition = &id +} + +// ClearDefinition clears the "definition" edge to the UserAttributeDefinition entity. +func (m *UserAttributeValueMutation) ClearDefinition() { + m.cleareddefinition = true + m.clearedFields[userattributevalue.FieldAttributeID] = struct{}{} +} + +// DefinitionCleared reports if the "definition" edge to the UserAttributeDefinition entity was cleared. +func (m *UserAttributeValueMutation) DefinitionCleared() bool { + return m.cleareddefinition +} + +// DefinitionID returns the "definition" edge ID in the mutation. +func (m *UserAttributeValueMutation) DefinitionID() (id int64, exists bool) { + if m.definition != nil { + return *m.definition, true + } + return +} + +// DefinitionIDs returns the "definition" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// DefinitionID instead. It exists only for internal usage by the builders. +func (m *UserAttributeValueMutation) DefinitionIDs() (ids []int64) { + if id := m.definition; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetDefinition resets all changes to the "definition" edge. +func (m *UserAttributeValueMutation) ResetDefinition() { + m.definition = nil + m.cleareddefinition = false +} + +// Where appends a list predicates to the UserAttributeValueMutation builder. +func (m *UserAttributeValueMutation) Where(ps ...predicate.UserAttributeValue) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserAttributeValueMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserAttributeValueMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserAttributeValue, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserAttributeValueMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserAttributeValueMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserAttributeValue). +func (m *UserAttributeValueMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserAttributeValueMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.created_at != nil { + fields = append(fields, userattributevalue.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, userattributevalue.FieldUpdatedAt) + } + if m.user != nil { + fields = append(fields, userattributevalue.FieldUserID) + } + if m.definition != nil { + fields = append(fields, userattributevalue.FieldAttributeID) + } + if m.value != nil { + fields = append(fields, userattributevalue.FieldValue) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserAttributeValueMutation) Field(name string) (ent.Value, bool) { + switch name { + case userattributevalue.FieldCreatedAt: + return m.CreatedAt() + case userattributevalue.FieldUpdatedAt: + return m.UpdatedAt() + case userattributevalue.FieldUserID: + return m.UserID() + case userattributevalue.FieldAttributeID: + return m.AttributeID() + case userattributevalue.FieldValue: + return m.Value() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserAttributeValueMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case userattributevalue.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case userattributevalue.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case userattributevalue.FieldUserID: + return m.OldUserID(ctx) + case userattributevalue.FieldAttributeID: + return m.OldAttributeID(ctx) + case userattributevalue.FieldValue: + return m.OldValue(ctx) + } + return nil, fmt.Errorf("unknown UserAttributeValue field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeValueMutation) SetField(name string, value ent.Value) error { + switch name { + case userattributevalue.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case userattributevalue.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case userattributevalue.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case userattributevalue.FieldAttributeID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAttributeID(v) + return nil + case userattributevalue.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + } + return fmt.Errorf("unknown UserAttributeValue field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserAttributeValueMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserAttributeValueMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeValueMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown UserAttributeValue numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserAttributeValueMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserAttributeValueMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserAttributeValueMutation) ClearField(name string) error { + return fmt.Errorf("unknown UserAttributeValue nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserAttributeValueMutation) ResetField(name string) error { + switch name { + case userattributevalue.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case userattributevalue.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case userattributevalue.FieldUserID: + m.ResetUserID() + return nil + case userattributevalue.FieldAttributeID: + m.ResetAttributeID() + return nil + case userattributevalue.FieldValue: + m.ResetValue() + return nil + } + return fmt.Errorf("unknown UserAttributeValue field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserAttributeValueMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, userattributevalue.EdgeUser) + } + if m.definition != nil { + edges = append(edges, userattributevalue.EdgeDefinition) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserAttributeValueMutation) AddedIDs(name string) []ent.Value { + switch name { + case userattributevalue.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case userattributevalue.EdgeDefinition: + if id := m.definition; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserAttributeValueMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserAttributeValueMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserAttributeValueMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, userattributevalue.EdgeUser) + } + if m.cleareddefinition { + edges = append(edges, userattributevalue.EdgeDefinition) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserAttributeValueMutation) EdgeCleared(name string) bool { + switch name { + case userattributevalue.EdgeUser: + return m.cleareduser + case userattributevalue.EdgeDefinition: + return m.cleareddefinition + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserAttributeValueMutation) ClearEdge(name string) error { + switch name { + case userattributevalue.EdgeUser: + m.ClearUser() + return nil + case userattributevalue.EdgeDefinition: + m.ClearDefinition() + return nil + } + return fmt.Errorf("unknown UserAttributeValue unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserAttributeValueMutation) ResetEdge(name string) error { + switch name { + case userattributevalue.EdgeUser: + m.ResetUser() + return nil + case userattributevalue.EdgeDefinition: + m.ResetDefinition() + return nil + } + return fmt.Errorf("unknown UserAttributeValue edge %s", name) +} + // UserSubscriptionMutation represents an operation that mutates the UserSubscription nodes in the graph. type UserSubscriptionMutation struct { config diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index f6bdf466..ae1bf007 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -36,5 +36,11 @@ type User func(*sql.Selector) // UserAllowedGroup is the predicate function for userallowedgroup builders. type UserAllowedGroup func(*sql.Selector) +// UserAttributeDefinition is the predicate function for userattributedefinition builders. +type UserAttributeDefinition func(*sql.Selector) + +// UserAttributeValue is the predicate function for userattributevalue builders. +type UserAttributeValue func(*sql.Selector) + // UserSubscription is the predicate function for usersubscription builders. type UserSubscription func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 0b254b3e..12c3e7e3 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -16,6 +16,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" "github.com/Wei-Shaw/sub2api/ent/usersubscription" ) @@ -604,14 +606,8 @@ func init() { user.DefaultUsername = userDescUsername.Default.(string) // user.UsernameValidator is a validator for the "username" field. It is called by the builders before save. user.UsernameValidator = userDescUsername.Validators[0].(func(string) error) - // userDescWechat is the schema descriptor for wechat field. - userDescWechat := userFields[7].Descriptor() - // user.DefaultWechat holds the default value on creation for the wechat field. - user.DefaultWechat = userDescWechat.Default.(string) - // user.WechatValidator is a validator for the "wechat" field. It is called by the builders before save. - user.WechatValidator = userDescWechat.Validators[0].(func(string) error) // userDescNotes is the schema descriptor for notes field. - userDescNotes := userFields[8].Descriptor() + userDescNotes := userFields[7].Descriptor() // user.DefaultNotes holds the default value on creation for the notes field. user.DefaultNotes = userDescNotes.Default.(string) userallowedgroupFields := schema.UserAllowedGroup{}.Fields() @@ -620,6 +616,128 @@ func init() { userallowedgroupDescCreatedAt := userallowedgroupFields[2].Descriptor() // userallowedgroup.DefaultCreatedAt holds the default value on creation for the created_at field. userallowedgroup.DefaultCreatedAt = userallowedgroupDescCreatedAt.Default.(func() time.Time) + userattributedefinitionMixin := schema.UserAttributeDefinition{}.Mixin() + userattributedefinitionMixinHooks1 := userattributedefinitionMixin[1].Hooks() + userattributedefinition.Hooks[0] = userattributedefinitionMixinHooks1[0] + userattributedefinitionMixinInters1 := userattributedefinitionMixin[1].Interceptors() + userattributedefinition.Interceptors[0] = userattributedefinitionMixinInters1[0] + userattributedefinitionMixinFields0 := userattributedefinitionMixin[0].Fields() + _ = userattributedefinitionMixinFields0 + userattributedefinitionFields := schema.UserAttributeDefinition{}.Fields() + _ = userattributedefinitionFields + // userattributedefinitionDescCreatedAt is the schema descriptor for created_at field. + userattributedefinitionDescCreatedAt := userattributedefinitionMixinFields0[0].Descriptor() + // userattributedefinition.DefaultCreatedAt holds the default value on creation for the created_at field. + userattributedefinition.DefaultCreatedAt = userattributedefinitionDescCreatedAt.Default.(func() time.Time) + // userattributedefinitionDescUpdatedAt is the schema descriptor for updated_at field. + userattributedefinitionDescUpdatedAt := userattributedefinitionMixinFields0[1].Descriptor() + // userattributedefinition.DefaultUpdatedAt holds the default value on creation for the updated_at field. + userattributedefinition.DefaultUpdatedAt = userattributedefinitionDescUpdatedAt.Default.(func() time.Time) + // userattributedefinition.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + userattributedefinition.UpdateDefaultUpdatedAt = userattributedefinitionDescUpdatedAt.UpdateDefault.(func() time.Time) + // userattributedefinitionDescKey is the schema descriptor for key field. + userattributedefinitionDescKey := userattributedefinitionFields[0].Descriptor() + // userattributedefinition.KeyValidator is a validator for the "key" field. It is called by the builders before save. + userattributedefinition.KeyValidator = func() func(string) error { + validators := userattributedefinitionDescKey.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(key string) error { + for _, fn := range fns { + if err := fn(key); err != nil { + return err + } + } + return nil + } + }() + // userattributedefinitionDescName is the schema descriptor for name field. + userattributedefinitionDescName := userattributedefinitionFields[1].Descriptor() + // userattributedefinition.NameValidator is a validator for the "name" field. It is called by the builders before save. + userattributedefinition.NameValidator = func() func(string) error { + validators := userattributedefinitionDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // userattributedefinitionDescDescription is the schema descriptor for description field. + userattributedefinitionDescDescription := userattributedefinitionFields[2].Descriptor() + // userattributedefinition.DefaultDescription holds the default value on creation for the description field. + userattributedefinition.DefaultDescription = userattributedefinitionDescDescription.Default.(string) + // userattributedefinitionDescType is the schema descriptor for type field. + userattributedefinitionDescType := userattributedefinitionFields[3].Descriptor() + // userattributedefinition.TypeValidator is a validator for the "type" field. It is called by the builders before save. + userattributedefinition.TypeValidator = func() func(string) error { + validators := userattributedefinitionDescType.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(_type string) error { + for _, fn := range fns { + if err := fn(_type); err != nil { + return err + } + } + return nil + } + }() + // userattributedefinitionDescOptions is the schema descriptor for options field. + userattributedefinitionDescOptions := userattributedefinitionFields[4].Descriptor() + // userattributedefinition.DefaultOptions holds the default value on creation for the options field. + userattributedefinition.DefaultOptions = userattributedefinitionDescOptions.Default.([]map[string]interface{}) + // userattributedefinitionDescRequired is the schema descriptor for required field. + userattributedefinitionDescRequired := userattributedefinitionFields[5].Descriptor() + // userattributedefinition.DefaultRequired holds the default value on creation for the required field. + userattributedefinition.DefaultRequired = userattributedefinitionDescRequired.Default.(bool) + // userattributedefinitionDescValidation is the schema descriptor for validation field. + userattributedefinitionDescValidation := userattributedefinitionFields[6].Descriptor() + // userattributedefinition.DefaultValidation holds the default value on creation for the validation field. + userattributedefinition.DefaultValidation = userattributedefinitionDescValidation.Default.(map[string]interface{}) + // userattributedefinitionDescPlaceholder is the schema descriptor for placeholder field. + userattributedefinitionDescPlaceholder := userattributedefinitionFields[7].Descriptor() + // userattributedefinition.DefaultPlaceholder holds the default value on creation for the placeholder field. + userattributedefinition.DefaultPlaceholder = userattributedefinitionDescPlaceholder.Default.(string) + // userattributedefinition.PlaceholderValidator is a validator for the "placeholder" field. It is called by the builders before save. + userattributedefinition.PlaceholderValidator = userattributedefinitionDescPlaceholder.Validators[0].(func(string) error) + // userattributedefinitionDescDisplayOrder is the schema descriptor for display_order field. + userattributedefinitionDescDisplayOrder := userattributedefinitionFields[8].Descriptor() + // userattributedefinition.DefaultDisplayOrder holds the default value on creation for the display_order field. + userattributedefinition.DefaultDisplayOrder = userattributedefinitionDescDisplayOrder.Default.(int) + // userattributedefinitionDescEnabled is the schema descriptor for enabled field. + userattributedefinitionDescEnabled := userattributedefinitionFields[9].Descriptor() + // userattributedefinition.DefaultEnabled holds the default value on creation for the enabled field. + userattributedefinition.DefaultEnabled = userattributedefinitionDescEnabled.Default.(bool) + userattributevalueMixin := schema.UserAttributeValue{}.Mixin() + userattributevalueMixinFields0 := userattributevalueMixin[0].Fields() + _ = userattributevalueMixinFields0 + userattributevalueFields := schema.UserAttributeValue{}.Fields() + _ = userattributevalueFields + // userattributevalueDescCreatedAt is the schema descriptor for created_at field. + userattributevalueDescCreatedAt := userattributevalueMixinFields0[0].Descriptor() + // userattributevalue.DefaultCreatedAt holds the default value on creation for the created_at field. + userattributevalue.DefaultCreatedAt = userattributevalueDescCreatedAt.Default.(func() time.Time) + // userattributevalueDescUpdatedAt is the schema descriptor for updated_at field. + userattributevalueDescUpdatedAt := userattributevalueMixinFields0[1].Descriptor() + // userattributevalue.DefaultUpdatedAt holds the default value on creation for the updated_at field. + userattributevalue.DefaultUpdatedAt = userattributevalueDescUpdatedAt.Default.(func() time.Time) + // userattributevalue.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + userattributevalue.UpdateDefaultUpdatedAt = userattributevalueDescUpdatedAt.UpdateDefault.(func() time.Time) + // userattributevalueDescValue is the schema descriptor for value field. + userattributevalueDescValue := userattributevalueFields[2].Descriptor() + // userattributevalue.DefaultValue holds the default value on creation for the value field. + userattributevalue.DefaultValue = userattributevalueDescValue.Default.(string) usersubscriptionMixin := schema.UserSubscription{}.Mixin() usersubscriptionMixinHooks1 := usersubscriptionMixin[1].Hooks() usersubscription.Hooks[0] = usersubscriptionMixinHooks1[0] diff --git a/backend/ent/schema/user_attribute_definition.go b/backend/ent/schema/user_attribute_definition.go new file mode 100644 index 00000000..eb54171a --- /dev/null +++ b/backend/ent/schema/user_attribute_definition.go @@ -0,0 +1,109 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserAttributeDefinition holds the schema definition for custom user attributes. +// +// This entity defines the metadata for user attributes, such as: +// - Attribute key (unique identifier like "company_name") +// - Display name shown in forms +// - Field type (text, number, select, etc.) +// - Validation rules +// - Whether the field is required or enabled +type UserAttributeDefinition struct { + ent.Schema +} + +func (UserAttributeDefinition) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_attribute_definitions"}, + } +} + +func (UserAttributeDefinition) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (UserAttributeDefinition) Fields() []ent.Field { + return []ent.Field{ + // key: Unique identifier for the attribute (e.g., "company_name") + // Used for programmatic reference + field.String("key"). + MaxLen(100). + NotEmpty(), + + // name: Display name shown in forms (e.g., "Company Name") + field.String("name"). + MaxLen(255). + NotEmpty(), + + // description: Optional description/help text for the attribute + field.String("description"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Default(""), + + // type: Attribute type - text, textarea, number, email, url, date, select, multi_select + field.String("type"). + MaxLen(20). + NotEmpty(), + + // options: Select options for select/multi_select types (stored as JSONB) + // Format: [{"value": "xxx", "label": "XXX"}, ...] + field.JSON("options", []map[string]any{}). + Default([]map[string]any{}). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // required: Whether this attribute is required when editing a user + field.Bool("required"). + Default(false), + + // validation: Validation rules for the attribute value (stored as JSONB) + // Format: {"min_length": 1, "max_length": 100, "min": 0, "max": 100, "pattern": "^[a-z]+$", "message": "..."} + field.JSON("validation", map[string]any{}). + Default(map[string]any{}). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // placeholder: Placeholder text shown in input fields + field.String("placeholder"). + MaxLen(255). + Default(""), + + // display_order: Order in which attributes are displayed (lower = first) + field.Int("display_order"). + Default(0), + + // enabled: Whether this attribute is active and shown in forms + field.Bool("enabled"). + Default(true), + } +} + +func (UserAttributeDefinition) Edges() []ent.Edge { + return []ent.Edge{ + // values: All user values for this attribute definition + edge.To("values", UserAttributeValue.Type), + } +} + +func (UserAttributeDefinition) Indexes() []ent.Index { + return []ent.Index{ + // Partial unique index on key (WHERE deleted_at IS NULL) via migration + index.Fields("key"), + index.Fields("enabled"), + index.Fields("display_order"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/user_attribute_value.go b/backend/ent/schema/user_attribute_value.go new file mode 100644 index 00000000..fb9a9727 --- /dev/null +++ b/backend/ent/schema/user_attribute_value.go @@ -0,0 +1,74 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserAttributeValue holds a user's value for a specific attribute. +// +// This entity stores the actual values that users have for each attribute definition. +// Values are stored as strings and converted to the appropriate type by the application. +type UserAttributeValue struct { + ent.Schema +} + +func (UserAttributeValue) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_attribute_values"}, + } +} + +func (UserAttributeValue) Mixin() []ent.Mixin { + return []ent.Mixin{ + // Only use TimeMixin, no soft delete - values are hard deleted + mixins.TimeMixin{}, + } +} + +func (UserAttributeValue) Fields() []ent.Field { + return []ent.Field{ + // user_id: References the user this value belongs to + field.Int64("user_id"), + + // attribute_id: References the attribute definition + field.Int64("attribute_id"), + + // value: The actual value stored as a string + // For multi_select, this is a JSON array string + field.Text("value"). + Default(""), + } +} + +func (UserAttributeValue) Edges() []ent.Edge { + return []ent.Edge{ + // user: The user who owns this attribute value + edge.From("user", User.Type). + Ref("attribute_values"). + Field("user_id"). + Required(). + Unique(), + + // definition: The attribute definition this value is for + edge.From("definition", UserAttributeDefinition.Type). + Ref("values"). + Field("attribute_id"). + Required(). + Unique(), + } +} + +func (UserAttributeValue) Indexes() []ent.Index { + return []ent.Index{ + // Unique index on (user_id, attribute_id) + index.Fields("user_id", "attribute_id").Unique(), + index.Fields("attribute_id"), + } +} diff --git a/backend/ent/tx.go b/backend/ent/tx.go index ecb0409d..b1bbdfc5 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -34,6 +34,10 @@ type Tx struct { User *UserClient // UserAllowedGroup is the client for interacting with the UserAllowedGroup builders. UserAllowedGroup *UserAllowedGroupClient + // UserAttributeDefinition is the client for interacting with the UserAttributeDefinition builders. + UserAttributeDefinition *UserAttributeDefinitionClient + // UserAttributeValue is the client for interacting with the UserAttributeValue builders. + UserAttributeValue *UserAttributeValueClient // UserSubscription is the client for interacting with the UserSubscription builders. UserSubscription *UserSubscriptionClient @@ -177,6 +181,8 @@ func (tx *Tx) init() { tx.UsageLog = NewUsageLogClient(tx.config) tx.User = NewUserClient(tx.config) tx.UserAllowedGroup = NewUserAllowedGroupClient(tx.config) + tx.UserAttributeDefinition = NewUserAttributeDefinitionClient(tx.config) + tx.UserAttributeValue = NewUserAttributeValueClient(tx.config) tx.UserSubscription = NewUserSubscriptionClient(tx.config) } diff --git a/backend/ent/userattributedefinition.go b/backend/ent/userattributedefinition.go new file mode 100644 index 00000000..2ed86e4e --- /dev/null +++ b/backend/ent/userattributedefinition.go @@ -0,0 +1,276 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" +) + +// UserAttributeDefinition is the model entity for the UserAttributeDefinition schema. +type UserAttributeDefinition struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Options holds the value of the "options" field. + Options []map[string]interface{} `json:"options,omitempty"` + // Required holds the value of the "required" field. + Required bool `json:"required,omitempty"` + // Validation holds the value of the "validation" field. + Validation map[string]interface{} `json:"validation,omitempty"` + // Placeholder holds the value of the "placeholder" field. + Placeholder string `json:"placeholder,omitempty"` + // DisplayOrder holds the value of the "display_order" field. + DisplayOrder int `json:"display_order,omitempty"` + // Enabled holds the value of the "enabled" field. + Enabled bool `json:"enabled,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserAttributeDefinitionQuery when eager-loading is set. + Edges UserAttributeDefinitionEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserAttributeDefinitionEdges holds the relations/edges for other nodes in the graph. +type UserAttributeDefinitionEdges struct { + // Values holds the value of the values edge. + Values []*UserAttributeValue `json:"values,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// ValuesOrErr returns the Values value or an error if the edge +// was not loaded in eager-loading. +func (e UserAttributeDefinitionEdges) ValuesOrErr() ([]*UserAttributeValue, error) { + if e.loadedTypes[0] { + return e.Values, nil + } + return nil, &NotLoadedError{edge: "values"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserAttributeDefinition) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case userattributedefinition.FieldOptions, userattributedefinition.FieldValidation: + values[i] = new([]byte) + case userattributedefinition.FieldRequired, userattributedefinition.FieldEnabled: + values[i] = new(sql.NullBool) + case userattributedefinition.FieldID, userattributedefinition.FieldDisplayOrder: + values[i] = new(sql.NullInt64) + case userattributedefinition.FieldKey, userattributedefinition.FieldName, userattributedefinition.FieldDescription, userattributedefinition.FieldType, userattributedefinition.FieldPlaceholder: + values[i] = new(sql.NullString) + case userattributedefinition.FieldCreatedAt, userattributedefinition.FieldUpdatedAt, userattributedefinition.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserAttributeDefinition fields. +func (_m *UserAttributeDefinition) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case userattributedefinition.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case userattributedefinition.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case userattributedefinition.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case userattributedefinition.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case userattributedefinition.FieldKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[i]) + } else if value.Valid { + _m.Key = value.String + } + case userattributedefinition.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case userattributedefinition.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + _m.Description = value.String + } + case userattributedefinition.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + _m.Type = value.String + } + case userattributedefinition.FieldOptions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field options", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Options); err != nil { + return fmt.Errorf("unmarshal field options: %w", err) + } + } + case userattributedefinition.FieldRequired: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field required", values[i]) + } else if value.Valid { + _m.Required = value.Bool + } + case userattributedefinition.FieldValidation: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field validation", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Validation); err != nil { + return fmt.Errorf("unmarshal field validation: %w", err) + } + } + case userattributedefinition.FieldPlaceholder: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field placeholder", values[i]) + } else if value.Valid { + _m.Placeholder = value.String + } + case userattributedefinition.FieldDisplayOrder: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field display_order", values[i]) + } else if value.Valid { + _m.DisplayOrder = int(value.Int64) + } + case userattributedefinition.FieldEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field enabled", values[i]) + } else if value.Valid { + _m.Enabled = value.Bool + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UserAttributeDefinition. +// This includes values selected through modifiers, order, etc. +func (_m *UserAttributeDefinition) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryValues queries the "values" edge of the UserAttributeDefinition entity. +func (_m *UserAttributeDefinition) QueryValues() *UserAttributeValueQuery { + return NewUserAttributeDefinitionClient(_m.config).QueryValues(_m) +} + +// Update returns a builder for updating this UserAttributeDefinition. +// Note that you need to call UserAttributeDefinition.Unwrap() before calling this method if this UserAttributeDefinition +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserAttributeDefinition) Update() *UserAttributeDefinitionUpdateOne { + return NewUserAttributeDefinitionClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserAttributeDefinition entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserAttributeDefinition) Unwrap() *UserAttributeDefinition { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserAttributeDefinition is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserAttributeDefinition) String() string { + var builder strings.Builder + builder.WriteString("UserAttributeDefinition(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("key=") + builder.WriteString(_m.Key) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(_m.Description) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(_m.Type) + builder.WriteString(", ") + builder.WriteString("options=") + builder.WriteString(fmt.Sprintf("%v", _m.Options)) + builder.WriteString(", ") + builder.WriteString("required=") + builder.WriteString(fmt.Sprintf("%v", _m.Required)) + builder.WriteString(", ") + builder.WriteString("validation=") + builder.WriteString(fmt.Sprintf("%v", _m.Validation)) + builder.WriteString(", ") + builder.WriteString("placeholder=") + builder.WriteString(_m.Placeholder) + builder.WriteString(", ") + builder.WriteString("display_order=") + builder.WriteString(fmt.Sprintf("%v", _m.DisplayOrder)) + builder.WriteString(", ") + builder.WriteString("enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.Enabled)) + builder.WriteByte(')') + return builder.String() +} + +// UserAttributeDefinitions is a parsable slice of UserAttributeDefinition. +type UserAttributeDefinitions []*UserAttributeDefinition diff --git a/backend/ent/userattributedefinition/userattributedefinition.go b/backend/ent/userattributedefinition/userattributedefinition.go new file mode 100644 index 00000000..ce398c03 --- /dev/null +++ b/backend/ent/userattributedefinition/userattributedefinition.go @@ -0,0 +1,205 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributedefinition + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the userattributedefinition type in the database. + Label = "user_attribute_definition" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldOptions holds the string denoting the options field in the database. + FieldOptions = "options" + // FieldRequired holds the string denoting the required field in the database. + FieldRequired = "required" + // FieldValidation holds the string denoting the validation field in the database. + FieldValidation = "validation" + // FieldPlaceholder holds the string denoting the placeholder field in the database. + FieldPlaceholder = "placeholder" + // FieldDisplayOrder holds the string denoting the display_order field in the database. + FieldDisplayOrder = "display_order" + // FieldEnabled holds the string denoting the enabled field in the database. + FieldEnabled = "enabled" + // EdgeValues holds the string denoting the values edge name in mutations. + EdgeValues = "values" + // Table holds the table name of the userattributedefinition in the database. + Table = "user_attribute_definitions" + // ValuesTable is the table that holds the values relation/edge. + ValuesTable = "user_attribute_values" + // ValuesInverseTable is the table name for the UserAttributeValue entity. + // It exists in this package in order to avoid circular dependency with the "userattributevalue" package. + ValuesInverseTable = "user_attribute_values" + // ValuesColumn is the table column denoting the values relation/edge. + ValuesColumn = "attribute_id" +) + +// Columns holds all SQL columns for userattributedefinition fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldKey, + FieldName, + FieldDescription, + FieldType, + FieldOptions, + FieldRequired, + FieldValidation, + FieldPlaceholder, + FieldDisplayOrder, + FieldEnabled, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // KeyValidator is a validator for the "key" field. It is called by the builders before save. + KeyValidator func(string) error + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultDescription holds the default value on creation for the "description" field. + DefaultDescription string + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator func(string) error + // DefaultOptions holds the default value on creation for the "options" field. + DefaultOptions []map[string]interface{} + // DefaultRequired holds the default value on creation for the "required" field. + DefaultRequired bool + // DefaultValidation holds the default value on creation for the "validation" field. + DefaultValidation map[string]interface{} + // DefaultPlaceholder holds the default value on creation for the "placeholder" field. + DefaultPlaceholder string + // PlaceholderValidator is a validator for the "placeholder" field. It is called by the builders before save. + PlaceholderValidator func(string) error + // DefaultDisplayOrder holds the default value on creation for the "display_order" field. + DefaultDisplayOrder int + // DefaultEnabled holds the default value on creation for the "enabled" field. + DefaultEnabled bool +) + +// OrderOption defines the ordering options for the UserAttributeDefinition queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByRequired orders the results by the required field. +func ByRequired(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRequired, opts...).ToFunc() +} + +// ByPlaceholder orders the results by the placeholder field. +func ByPlaceholder(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPlaceholder, opts...).ToFunc() +} + +// ByDisplayOrder orders the results by the display_order field. +func ByDisplayOrder(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDisplayOrder, opts...).ToFunc() +} + +// ByEnabled orders the results by the enabled field. +func ByEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEnabled, opts...).ToFunc() +} + +// ByValuesCount orders the results by values count. +func ByValuesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newValuesStep(), opts...) + } +} + +// ByValues orders the results by values terms. +func ByValues(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newValuesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newValuesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ValuesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ValuesTable, ValuesColumn), + ) +} diff --git a/backend/ent/userattributedefinition/where.go b/backend/ent/userattributedefinition/where.go new file mode 100644 index 00000000..7f4d06cb --- /dev/null +++ b/backend/ent/userattributedefinition/where.go @@ -0,0 +1,664 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributedefinition + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldKey, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDescription, v)) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldType, v)) +} + +// Required applies equality check predicate on the "required" field. It's identical to RequiredEQ. +func Required(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldRequired, v)) +} + +// Placeholder applies equality check predicate on the "placeholder" field. It's identical to PlaceholderEQ. +func Placeholder(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldPlaceholder, v)) +} + +// DisplayOrder applies equality check predicate on the "display_order" field. It's identical to DisplayOrderEQ. +func DisplayOrder(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDisplayOrder, v)) +} + +// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ. +func Enabled(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldEnabled, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotNull(FieldDeletedAt)) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldKey, v)) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldKey, v)) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldKey, vs...)) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldKey, vs...)) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldKey, v)) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldKey, v)) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldKey, v)) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldKey, v)) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldKey, v)) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldKey, v)) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldKey, v)) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldKey, v)) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldKey, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldDescription, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldType, vs...)) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldType, v)) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldType, v)) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldType, v)) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldType, v)) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldType, v)) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldType, v)) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldType, v)) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldType, v)) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldType, v)) +} + +// RequiredEQ applies the EQ predicate on the "required" field. +func RequiredEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldRequired, v)) +} + +// RequiredNEQ applies the NEQ predicate on the "required" field. +func RequiredNEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldRequired, v)) +} + +// PlaceholderEQ applies the EQ predicate on the "placeholder" field. +func PlaceholderEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldPlaceholder, v)) +} + +// PlaceholderNEQ applies the NEQ predicate on the "placeholder" field. +func PlaceholderNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldPlaceholder, v)) +} + +// PlaceholderIn applies the In predicate on the "placeholder" field. +func PlaceholderIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldPlaceholder, vs...)) +} + +// PlaceholderNotIn applies the NotIn predicate on the "placeholder" field. +func PlaceholderNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldPlaceholder, vs...)) +} + +// PlaceholderGT applies the GT predicate on the "placeholder" field. +func PlaceholderGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldPlaceholder, v)) +} + +// PlaceholderGTE applies the GTE predicate on the "placeholder" field. +func PlaceholderGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldPlaceholder, v)) +} + +// PlaceholderLT applies the LT predicate on the "placeholder" field. +func PlaceholderLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldPlaceholder, v)) +} + +// PlaceholderLTE applies the LTE predicate on the "placeholder" field. +func PlaceholderLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldPlaceholder, v)) +} + +// PlaceholderContains applies the Contains predicate on the "placeholder" field. +func PlaceholderContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldPlaceholder, v)) +} + +// PlaceholderHasPrefix applies the HasPrefix predicate on the "placeholder" field. +func PlaceholderHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldPlaceholder, v)) +} + +// PlaceholderHasSuffix applies the HasSuffix predicate on the "placeholder" field. +func PlaceholderHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldPlaceholder, v)) +} + +// PlaceholderEqualFold applies the EqualFold predicate on the "placeholder" field. +func PlaceholderEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldPlaceholder, v)) +} + +// PlaceholderContainsFold applies the ContainsFold predicate on the "placeholder" field. +func PlaceholderContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldPlaceholder, v)) +} + +// DisplayOrderEQ applies the EQ predicate on the "display_order" field. +func DisplayOrderEQ(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDisplayOrder, v)) +} + +// DisplayOrderNEQ applies the NEQ predicate on the "display_order" field. +func DisplayOrderNEQ(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldDisplayOrder, v)) +} + +// DisplayOrderIn applies the In predicate on the "display_order" field. +func DisplayOrderIn(vs ...int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldDisplayOrder, vs...)) +} + +// DisplayOrderNotIn applies the NotIn predicate on the "display_order" field. +func DisplayOrderNotIn(vs ...int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldDisplayOrder, vs...)) +} + +// DisplayOrderGT applies the GT predicate on the "display_order" field. +func DisplayOrderGT(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldDisplayOrder, v)) +} + +// DisplayOrderGTE applies the GTE predicate on the "display_order" field. +func DisplayOrderGTE(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldDisplayOrder, v)) +} + +// DisplayOrderLT applies the LT predicate on the "display_order" field. +func DisplayOrderLT(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldDisplayOrder, v)) +} + +// DisplayOrderLTE applies the LTE predicate on the "display_order" field. +func DisplayOrderLTE(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldDisplayOrder, v)) +} + +// EnabledEQ applies the EQ predicate on the "enabled" field. +func EnabledEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldEnabled, v)) +} + +// EnabledNEQ applies the NEQ predicate on the "enabled" field. +func EnabledNEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldEnabled, v)) +} + +// HasValues applies the HasEdge predicate on the "values" edge. +func HasValues() predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ValuesTable, ValuesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasValuesWith applies the HasEdge predicate on the "values" edge with a given conditions (other predicates). +func HasValuesWith(preds ...predicate.UserAttributeValue) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(func(s *sql.Selector) { + step := newValuesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserAttributeDefinition) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserAttributeDefinition) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserAttributeDefinition) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.NotPredicates(p)) +} diff --git a/backend/ent/userattributedefinition_create.go b/backend/ent/userattributedefinition_create.go new file mode 100644 index 00000000..a018c060 --- /dev/null +++ b/backend/ent/userattributedefinition_create.go @@ -0,0 +1,1267 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeDefinitionCreate is the builder for creating a UserAttributeDefinition entity. +type UserAttributeDefinitionCreate struct { + config + mutation *UserAttributeDefinitionMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserAttributeDefinitionCreate) SetCreatedAt(v time.Time) *UserAttributeDefinitionCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableCreatedAt(v *time.Time) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserAttributeDefinitionCreate) SetUpdatedAt(v time.Time) *UserAttributeDefinitionCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableUpdatedAt(v *time.Time) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *UserAttributeDefinitionCreate) SetDeletedAt(v time.Time) *UserAttributeDefinitionCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableDeletedAt(v *time.Time) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetKey sets the "key" field. +func (_c *UserAttributeDefinitionCreate) SetKey(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetKey(v) + return _c +} + +// SetName sets the "name" field. +func (_c *UserAttributeDefinitionCreate) SetName(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetName(v) + return _c +} + +// SetDescription sets the "description" field. +func (_c *UserAttributeDefinitionCreate) SetDescription(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetDescription(v) + return _c +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableDescription(v *string) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetDescription(*v) + } + return _c +} + +// SetType sets the "type" field. +func (_c *UserAttributeDefinitionCreate) SetType(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetType(v) + return _c +} + +// SetOptions sets the "options" field. +func (_c *UserAttributeDefinitionCreate) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionCreate { + _c.mutation.SetOptions(v) + return _c +} + +// SetRequired sets the "required" field. +func (_c *UserAttributeDefinitionCreate) SetRequired(v bool) *UserAttributeDefinitionCreate { + _c.mutation.SetRequired(v) + return _c +} + +// SetNillableRequired sets the "required" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableRequired(v *bool) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetRequired(*v) + } + return _c +} + +// SetValidation sets the "validation" field. +func (_c *UserAttributeDefinitionCreate) SetValidation(v map[string]interface{}) *UserAttributeDefinitionCreate { + _c.mutation.SetValidation(v) + return _c +} + +// SetPlaceholder sets the "placeholder" field. +func (_c *UserAttributeDefinitionCreate) SetPlaceholder(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetPlaceholder(v) + return _c +} + +// SetNillablePlaceholder sets the "placeholder" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillablePlaceholder(v *string) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetPlaceholder(*v) + } + return _c +} + +// SetDisplayOrder sets the "display_order" field. +func (_c *UserAttributeDefinitionCreate) SetDisplayOrder(v int) *UserAttributeDefinitionCreate { + _c.mutation.SetDisplayOrder(v) + return _c +} + +// SetNillableDisplayOrder sets the "display_order" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableDisplayOrder(v *int) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetDisplayOrder(*v) + } + return _c +} + +// SetEnabled sets the "enabled" field. +func (_c *UserAttributeDefinitionCreate) SetEnabled(v bool) *UserAttributeDefinitionCreate { + _c.mutation.SetEnabled(v) + return _c +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableEnabled(v *bool) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetEnabled(*v) + } + return _c +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by IDs. +func (_c *UserAttributeDefinitionCreate) AddValueIDs(ids ...int64) *UserAttributeDefinitionCreate { + _c.mutation.AddValueIDs(ids...) + return _c +} + +// AddValues adds the "values" edges to the UserAttributeValue entity. +func (_c *UserAttributeDefinitionCreate) AddValues(v ...*UserAttributeValue) *UserAttributeDefinitionCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddValueIDs(ids...) +} + +// Mutation returns the UserAttributeDefinitionMutation object of the builder. +func (_c *UserAttributeDefinitionCreate) Mutation() *UserAttributeDefinitionMutation { + return _c.mutation +} + +// Save creates the UserAttributeDefinition in the database. +func (_c *UserAttributeDefinitionCreate) Save(ctx context.Context) (*UserAttributeDefinition, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserAttributeDefinitionCreate) SaveX(ctx context.Context) *UserAttributeDefinition { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeDefinitionCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeDefinitionCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserAttributeDefinitionCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if userattributedefinition.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if userattributedefinition.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Description(); !ok { + v := userattributedefinition.DefaultDescription + _c.mutation.SetDescription(v) + } + if _, ok := _c.mutation.Options(); !ok { + v := userattributedefinition.DefaultOptions + _c.mutation.SetOptions(v) + } + if _, ok := _c.mutation.Required(); !ok { + v := userattributedefinition.DefaultRequired + _c.mutation.SetRequired(v) + } + if _, ok := _c.mutation.Validation(); !ok { + v := userattributedefinition.DefaultValidation + _c.mutation.SetValidation(v) + } + if _, ok := _c.mutation.Placeholder(); !ok { + v := userattributedefinition.DefaultPlaceholder + _c.mutation.SetPlaceholder(v) + } + if _, ok := _c.mutation.DisplayOrder(); !ok { + v := userattributedefinition.DefaultDisplayOrder + _c.mutation.SetDisplayOrder(v) + } + if _, ok := _c.mutation.Enabled(); !ok { + v := userattributedefinition.DefaultEnabled + _c.mutation.SetEnabled(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserAttributeDefinitionCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserAttributeDefinition.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UserAttributeDefinition.updated_at"`)} + } + if _, ok := _c.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "UserAttributeDefinition.key"`)} + } + if v, ok := _c.mutation.Key(); ok { + if err := userattributedefinition.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.key": %w`, err)} + } + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "UserAttributeDefinition.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := userattributedefinition.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.name": %w`, err)} + } + } + if _, ok := _c.mutation.Description(); !ok { + return &ValidationError{Name: "description", err: errors.New(`ent: missing required field "UserAttributeDefinition.description"`)} + } + if _, ok := _c.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "UserAttributeDefinition.type"`)} + } + if v, ok := _c.mutation.GetType(); ok { + if err := userattributedefinition.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.type": %w`, err)} + } + } + if _, ok := _c.mutation.Options(); !ok { + return &ValidationError{Name: "options", err: errors.New(`ent: missing required field "UserAttributeDefinition.options"`)} + } + if _, ok := _c.mutation.Required(); !ok { + return &ValidationError{Name: "required", err: errors.New(`ent: missing required field "UserAttributeDefinition.required"`)} + } + if _, ok := _c.mutation.Validation(); !ok { + return &ValidationError{Name: "validation", err: errors.New(`ent: missing required field "UserAttributeDefinition.validation"`)} + } + if _, ok := _c.mutation.Placeholder(); !ok { + return &ValidationError{Name: "placeholder", err: errors.New(`ent: missing required field "UserAttributeDefinition.placeholder"`)} + } + if v, ok := _c.mutation.Placeholder(); ok { + if err := userattributedefinition.PlaceholderValidator(v); err != nil { + return &ValidationError{Name: "placeholder", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.placeholder": %w`, err)} + } + } + if _, ok := _c.mutation.DisplayOrder(); !ok { + return &ValidationError{Name: "display_order", err: errors.New(`ent: missing required field "UserAttributeDefinition.display_order"`)} + } + if _, ok := _c.mutation.Enabled(); !ok { + return &ValidationError{Name: "enabled", err: errors.New(`ent: missing required field "UserAttributeDefinition.enabled"`)} + } + return nil +} + +func (_c *UserAttributeDefinitionCreate) sqlSave(ctx context.Context) (*UserAttributeDefinition, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserAttributeDefinitionCreate) createSpec() (*UserAttributeDefinition, *sqlgraph.CreateSpec) { + var ( + _node = &UserAttributeDefinition{config: _c.config} + _spec = sqlgraph.NewCreateSpec(userattributedefinition.Table, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(userattributedefinition.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(userattributedefinition.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(userattributedefinition.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Key(); ok { + _spec.SetField(userattributedefinition.FieldKey, field.TypeString, value) + _node.Key = value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(userattributedefinition.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Description(); ok { + _spec.SetField(userattributedefinition.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := _c.mutation.GetType(); ok { + _spec.SetField(userattributedefinition.FieldType, field.TypeString, value) + _node.Type = value + } + if value, ok := _c.mutation.Options(); ok { + _spec.SetField(userattributedefinition.FieldOptions, field.TypeJSON, value) + _node.Options = value + } + if value, ok := _c.mutation.Required(); ok { + _spec.SetField(userattributedefinition.FieldRequired, field.TypeBool, value) + _node.Required = value + } + if value, ok := _c.mutation.Validation(); ok { + _spec.SetField(userattributedefinition.FieldValidation, field.TypeJSON, value) + _node.Validation = value + } + if value, ok := _c.mutation.Placeholder(); ok { + _spec.SetField(userattributedefinition.FieldPlaceholder, field.TypeString, value) + _node.Placeholder = value + } + if value, ok := _c.mutation.DisplayOrder(); ok { + _spec.SetField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + _node.DisplayOrder = value + } + if value, ok := _c.mutation.Enabled(); ok { + _spec.SetField(userattributedefinition.FieldEnabled, field.TypeBool, value) + _node.Enabled = value + } + if nodes := _c.mutation.ValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeDefinition.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeDefinitionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreate) OnConflict(opts ...sql.ConflictOption) *UserAttributeDefinitionUpsertOne { + _c.conflict = opts + return &UserAttributeDefinitionUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreate) OnConflictColumns(columns ...string) *UserAttributeDefinitionUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeDefinitionUpsertOne{ + create: _c, + } +} + +type ( + // UserAttributeDefinitionUpsertOne is the builder for "upsert"-ing + // one UserAttributeDefinition node. + UserAttributeDefinitionUpsertOne struct { + create *UserAttributeDefinitionCreate + } + + // UserAttributeDefinitionUpsert is the "OnConflict" setter. + UserAttributeDefinitionUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeDefinitionUpsert) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateUpdatedAt() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserAttributeDefinitionUpsert) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateDeletedAt() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserAttributeDefinitionUpsert) ClearDeletedAt() *UserAttributeDefinitionUpsert { + u.SetNull(userattributedefinition.FieldDeletedAt) + return u +} + +// SetKey sets the "key" field. +func (u *UserAttributeDefinitionUpsert) SetKey(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldKey, v) + return u +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateKey() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldKey) + return u +} + +// SetName sets the "name" field. +func (u *UserAttributeDefinitionUpsert) SetName(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateName() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *UserAttributeDefinitionUpsert) SetDescription(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateDescription() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldDescription) + return u +} + +// SetType sets the "type" field. +func (u *UserAttributeDefinitionUpsert) SetType(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldType, v) + return u +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateType() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldType) + return u +} + +// SetOptions sets the "options" field. +func (u *UserAttributeDefinitionUpsert) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldOptions, v) + return u +} + +// UpdateOptions sets the "options" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateOptions() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldOptions) + return u +} + +// SetRequired sets the "required" field. +func (u *UserAttributeDefinitionUpsert) SetRequired(v bool) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldRequired, v) + return u +} + +// UpdateRequired sets the "required" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateRequired() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldRequired) + return u +} + +// SetValidation sets the "validation" field. +func (u *UserAttributeDefinitionUpsert) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldValidation, v) + return u +} + +// UpdateValidation sets the "validation" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateValidation() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldValidation) + return u +} + +// SetPlaceholder sets the "placeholder" field. +func (u *UserAttributeDefinitionUpsert) SetPlaceholder(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldPlaceholder, v) + return u +} + +// UpdatePlaceholder sets the "placeholder" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdatePlaceholder() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldPlaceholder) + return u +} + +// SetDisplayOrder sets the "display_order" field. +func (u *UserAttributeDefinitionUpsert) SetDisplayOrder(v int) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldDisplayOrder, v) + return u +} + +// UpdateDisplayOrder sets the "display_order" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateDisplayOrder() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldDisplayOrder) + return u +} + +// AddDisplayOrder adds v to the "display_order" field. +func (u *UserAttributeDefinitionUpsert) AddDisplayOrder(v int) *UserAttributeDefinitionUpsert { + u.Add(userattributedefinition.FieldDisplayOrder, v) + return u +} + +// SetEnabled sets the "enabled" field. +func (u *UserAttributeDefinitionUpsert) SetEnabled(v bool) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldEnabled, v) + return u +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateEnabled() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldEnabled) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertOne) UpdateNewValues() *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(userattributedefinition.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertOne) Ignore() *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeDefinitionUpsertOne) DoNothing() *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeDefinitionCreate.OnConflict +// documentation for more info. +func (u *UserAttributeDefinitionUpsertOne) Update(set func(*UserAttributeDefinitionUpsert)) *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeDefinitionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeDefinitionUpsertOne) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateUpdatedAt() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertOne) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateDeletedAt() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertOne) ClearDeletedAt() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.ClearDeletedAt() + }) +} + +// SetKey sets the "key" field. +func (u *UserAttributeDefinitionUpsertOne) SetKey(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateKey() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *UserAttributeDefinitionUpsertOne) SetName(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateName() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *UserAttributeDefinitionUpsertOne) SetDescription(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateDescription() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDescription() + }) +} + +// SetType sets the "type" field. +func (u *UserAttributeDefinitionUpsertOne) SetType(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateType() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateType() + }) +} + +// SetOptions sets the "options" field. +func (u *UserAttributeDefinitionUpsertOne) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetOptions(v) + }) +} + +// UpdateOptions sets the "options" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateOptions() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateOptions() + }) +} + +// SetRequired sets the "required" field. +func (u *UserAttributeDefinitionUpsertOne) SetRequired(v bool) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetRequired(v) + }) +} + +// UpdateRequired sets the "required" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateRequired() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateRequired() + }) +} + +// SetValidation sets the "validation" field. +func (u *UserAttributeDefinitionUpsertOne) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetValidation(v) + }) +} + +// UpdateValidation sets the "validation" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateValidation() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateValidation() + }) +} + +// SetPlaceholder sets the "placeholder" field. +func (u *UserAttributeDefinitionUpsertOne) SetPlaceholder(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetPlaceholder(v) + }) +} + +// UpdatePlaceholder sets the "placeholder" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdatePlaceholder() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdatePlaceholder() + }) +} + +// SetDisplayOrder sets the "display_order" field. +func (u *UserAttributeDefinitionUpsertOne) SetDisplayOrder(v int) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDisplayOrder(v) + }) +} + +// AddDisplayOrder adds v to the "display_order" field. +func (u *UserAttributeDefinitionUpsertOne) AddDisplayOrder(v int) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.AddDisplayOrder(v) + }) +} + +// UpdateDisplayOrder sets the "display_order" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateDisplayOrder() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDisplayOrder() + }) +} + +// SetEnabled sets the "enabled" field. +func (u *UserAttributeDefinitionUpsertOne) SetEnabled(v bool) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetEnabled(v) + }) +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateEnabled() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateEnabled() + }) +} + +// Exec executes the query. +func (u *UserAttributeDefinitionUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeDefinitionCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeDefinitionUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserAttributeDefinitionUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserAttributeDefinitionUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserAttributeDefinitionCreateBulk is the builder for creating many UserAttributeDefinition entities in bulk. +type UserAttributeDefinitionCreateBulk struct { + config + err error + builders []*UserAttributeDefinitionCreate + conflict []sql.ConflictOption +} + +// Save creates the UserAttributeDefinition entities in the database. +func (_c *UserAttributeDefinitionCreateBulk) Save(ctx context.Context) ([]*UserAttributeDefinition, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserAttributeDefinition, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserAttributeDefinitionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserAttributeDefinitionCreateBulk) SaveX(ctx context.Context) []*UserAttributeDefinition { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeDefinitionCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeDefinitionCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeDefinition.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeDefinitionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserAttributeDefinitionUpsertBulk { + _c.conflict = opts + return &UserAttributeDefinitionUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreateBulk) OnConflictColumns(columns ...string) *UserAttributeDefinitionUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeDefinitionUpsertBulk{ + create: _c, + } +} + +// UserAttributeDefinitionUpsertBulk is the builder for "upsert"-ing +// a bulk of UserAttributeDefinition nodes. +type UserAttributeDefinitionUpsertBulk struct { + create *UserAttributeDefinitionCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertBulk) UpdateNewValues() *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(userattributedefinition.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertBulk) Ignore() *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeDefinitionUpsertBulk) DoNothing() *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeDefinitionCreateBulk.OnConflict +// documentation for more info. +func (u *UserAttributeDefinitionUpsertBulk) Update(set func(*UserAttributeDefinitionUpsert)) *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeDefinitionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeDefinitionUpsertBulk) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateUpdatedAt() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertBulk) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateDeletedAt() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertBulk) ClearDeletedAt() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.ClearDeletedAt() + }) +} + +// SetKey sets the "key" field. +func (u *UserAttributeDefinitionUpsertBulk) SetKey(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateKey() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *UserAttributeDefinitionUpsertBulk) SetName(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateName() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *UserAttributeDefinitionUpsertBulk) SetDescription(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateDescription() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDescription() + }) +} + +// SetType sets the "type" field. +func (u *UserAttributeDefinitionUpsertBulk) SetType(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateType() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateType() + }) +} + +// SetOptions sets the "options" field. +func (u *UserAttributeDefinitionUpsertBulk) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetOptions(v) + }) +} + +// UpdateOptions sets the "options" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateOptions() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateOptions() + }) +} + +// SetRequired sets the "required" field. +func (u *UserAttributeDefinitionUpsertBulk) SetRequired(v bool) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetRequired(v) + }) +} + +// UpdateRequired sets the "required" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateRequired() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateRequired() + }) +} + +// SetValidation sets the "validation" field. +func (u *UserAttributeDefinitionUpsertBulk) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetValidation(v) + }) +} + +// UpdateValidation sets the "validation" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateValidation() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateValidation() + }) +} + +// SetPlaceholder sets the "placeholder" field. +func (u *UserAttributeDefinitionUpsertBulk) SetPlaceholder(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetPlaceholder(v) + }) +} + +// UpdatePlaceholder sets the "placeholder" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdatePlaceholder() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdatePlaceholder() + }) +} + +// SetDisplayOrder sets the "display_order" field. +func (u *UserAttributeDefinitionUpsertBulk) SetDisplayOrder(v int) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDisplayOrder(v) + }) +} + +// AddDisplayOrder adds v to the "display_order" field. +func (u *UserAttributeDefinitionUpsertBulk) AddDisplayOrder(v int) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.AddDisplayOrder(v) + }) +} + +// UpdateDisplayOrder sets the "display_order" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateDisplayOrder() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDisplayOrder() + }) +} + +// SetEnabled sets the "enabled" field. +func (u *UserAttributeDefinitionUpsertBulk) SetEnabled(v bool) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetEnabled(v) + }) +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateEnabled() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateEnabled() + }) +} + +// Exec executes the query. +func (u *UserAttributeDefinitionUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserAttributeDefinitionCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeDefinitionCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeDefinitionUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributedefinition_delete.go b/backend/ent/userattributedefinition_delete.go new file mode 100644 index 00000000..8d879eb5 --- /dev/null +++ b/backend/ent/userattributedefinition_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" +) + +// UserAttributeDefinitionDelete is the builder for deleting a UserAttributeDefinition entity. +type UserAttributeDefinitionDelete struct { + config + hooks []Hook + mutation *UserAttributeDefinitionMutation +} + +// Where appends a list predicates to the UserAttributeDefinitionDelete builder. +func (_d *UserAttributeDefinitionDelete) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserAttributeDefinitionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeDefinitionDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserAttributeDefinitionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(userattributedefinition.Table, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserAttributeDefinitionDeleteOne is the builder for deleting a single UserAttributeDefinition entity. +type UserAttributeDefinitionDeleteOne struct { + _d *UserAttributeDefinitionDelete +} + +// Where appends a list predicates to the UserAttributeDefinitionDelete builder. +func (_d *UserAttributeDefinitionDeleteOne) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserAttributeDefinitionDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{userattributedefinition.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeDefinitionDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributedefinition_query.go b/backend/ent/userattributedefinition_query.go new file mode 100644 index 00000000..9022d306 --- /dev/null +++ b/backend/ent/userattributedefinition_query.go @@ -0,0 +1,606 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeDefinitionQuery is the builder for querying UserAttributeDefinition entities. +type UserAttributeDefinitionQuery struct { + config + ctx *QueryContext + order []userattributedefinition.OrderOption + inters []Interceptor + predicates []predicate.UserAttributeDefinition + withValues *UserAttributeValueQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserAttributeDefinitionQuery builder. +func (_q *UserAttributeDefinitionQuery) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserAttributeDefinitionQuery) Limit(limit int) *UserAttributeDefinitionQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserAttributeDefinitionQuery) Offset(offset int) *UserAttributeDefinitionQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserAttributeDefinitionQuery) Unique(unique bool) *UserAttributeDefinitionQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserAttributeDefinitionQuery) Order(o ...userattributedefinition.OrderOption) *UserAttributeDefinitionQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryValues chains the current query on the "values" edge. +func (_q *UserAttributeDefinitionQuery) QueryValues() *UserAttributeValueQuery { + query := (&UserAttributeValueClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userattributedefinition.Table, userattributedefinition.FieldID, selector), + sqlgraph.To(userattributevalue.Table, userattributevalue.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, userattributedefinition.ValuesTable, userattributedefinition.ValuesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserAttributeDefinition entity from the query. +// Returns a *NotFoundError when no UserAttributeDefinition was found. +func (_q *UserAttributeDefinitionQuery) First(ctx context.Context) (*UserAttributeDefinition, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{userattributedefinition.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) FirstX(ctx context.Context) *UserAttributeDefinition { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UserAttributeDefinition ID from the query. +// Returns a *NotFoundError when no UserAttributeDefinition ID was found. +func (_q *UserAttributeDefinitionQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{userattributedefinition.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UserAttributeDefinition entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserAttributeDefinition entity is found. +// Returns a *NotFoundError when no UserAttributeDefinition entities are found. +func (_q *UserAttributeDefinitionQuery) Only(ctx context.Context) (*UserAttributeDefinition, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{userattributedefinition.Label} + default: + return nil, &NotSingularError{userattributedefinition.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) OnlyX(ctx context.Context) *UserAttributeDefinition { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UserAttributeDefinition ID in the query. +// Returns a *NotSingularError when more than one UserAttributeDefinition ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserAttributeDefinitionQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{userattributedefinition.Label} + default: + err = &NotSingularError{userattributedefinition.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UserAttributeDefinitions. +func (_q *UserAttributeDefinitionQuery) All(ctx context.Context) ([]*UserAttributeDefinition, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserAttributeDefinition, *UserAttributeDefinitionQuery]() + return withInterceptors[[]*UserAttributeDefinition](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) AllX(ctx context.Context) []*UserAttributeDefinition { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UserAttributeDefinition IDs. +func (_q *UserAttributeDefinitionQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(userattributedefinition.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserAttributeDefinitionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserAttributeDefinitionQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserAttributeDefinitionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserAttributeDefinitionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserAttributeDefinitionQuery) Clone() *UserAttributeDefinitionQuery { + if _q == nil { + return nil + } + return &UserAttributeDefinitionQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]userattributedefinition.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserAttributeDefinition{}, _q.predicates...), + withValues: _q.withValues.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithValues tells the query-builder to eager-load the nodes that are connected to +// the "values" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAttributeDefinitionQuery) WithValues(opts ...func(*UserAttributeValueQuery)) *UserAttributeDefinitionQuery { + query := (&UserAttributeValueClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withValues = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserAttributeDefinition.Query(). +// GroupBy(userattributedefinition.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserAttributeDefinitionQuery) GroupBy(field string, fields ...string) *UserAttributeDefinitionGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserAttributeDefinitionGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = userattributedefinition.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UserAttributeDefinition.Query(). +// Select(userattributedefinition.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserAttributeDefinitionQuery) Select(fields ...string) *UserAttributeDefinitionSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserAttributeDefinitionSelect{UserAttributeDefinitionQuery: _q} + sbuild.label = userattributedefinition.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserAttributeDefinitionSelect configured with the given aggregations. +func (_q *UserAttributeDefinitionQuery) Aggregate(fns ...AggregateFunc) *UserAttributeDefinitionSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserAttributeDefinitionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !userattributedefinition.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserAttributeDefinitionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserAttributeDefinition, error) { + var ( + nodes = []*UserAttributeDefinition{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withValues != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserAttributeDefinition).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserAttributeDefinition{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withValues; query != nil { + if err := _q.loadValues(ctx, query, nodes, + func(n *UserAttributeDefinition) { n.Edges.Values = []*UserAttributeValue{} }, + func(n *UserAttributeDefinition, e *UserAttributeValue) { n.Edges.Values = append(n.Edges.Values, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserAttributeDefinitionQuery) loadValues(ctx context.Context, query *UserAttributeValueQuery, nodes []*UserAttributeDefinition, init func(*UserAttributeDefinition), assign func(*UserAttributeDefinition, *UserAttributeValue)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*UserAttributeDefinition) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(userattributevalue.FieldAttributeID) + } + query.Where(predicate.UserAttributeValue(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(userattributedefinition.ValuesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AttributeID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "attribute_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *UserAttributeDefinitionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserAttributeDefinitionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(userattributedefinition.Table, userattributedefinition.Columns, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributedefinition.FieldID) + for i := range fields { + if fields[i] != userattributedefinition.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserAttributeDefinitionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(userattributedefinition.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = userattributedefinition.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserAttributeDefinitionGroupBy is the group-by builder for UserAttributeDefinition entities. +type UserAttributeDefinitionGroupBy struct { + selector + build *UserAttributeDefinitionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserAttributeDefinitionGroupBy) Aggregate(fns ...AggregateFunc) *UserAttributeDefinitionGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserAttributeDefinitionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeDefinitionQuery, *UserAttributeDefinitionGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserAttributeDefinitionGroupBy) sqlScan(ctx context.Context, root *UserAttributeDefinitionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserAttributeDefinitionSelect is the builder for selecting fields of UserAttributeDefinition entities. +type UserAttributeDefinitionSelect struct { + *UserAttributeDefinitionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserAttributeDefinitionSelect) Aggregate(fns ...AggregateFunc) *UserAttributeDefinitionSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserAttributeDefinitionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeDefinitionQuery, *UserAttributeDefinitionSelect](ctx, _s.UserAttributeDefinitionQuery, _s, _s.inters, v) +} + +func (_s *UserAttributeDefinitionSelect) sqlScan(ctx context.Context, root *UserAttributeDefinitionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/userattributedefinition_update.go b/backend/ent/userattributedefinition_update.go new file mode 100644 index 00000000..6b9eb7d0 --- /dev/null +++ b/backend/ent/userattributedefinition_update.go @@ -0,0 +1,846 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeDefinitionUpdate is the builder for updating UserAttributeDefinition entities. +type UserAttributeDefinitionUpdate struct { + config + hooks []Hook + mutation *UserAttributeDefinitionMutation +} + +// Where appends a list predicates to the UserAttributeDefinitionUpdate builder. +func (_u *UserAttributeDefinitionUpdate) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeDefinitionUpdate) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdate) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableDeletedAt(v *time.Time) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdate) ClearDeletedAt() *UserAttributeDefinitionUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetKey sets the "key" field. +func (_u *UserAttributeDefinitionUpdate) SetKey(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableKey(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *UserAttributeDefinitionUpdate) SetName(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableName(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *UserAttributeDefinitionUpdate) SetDescription(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableDescription(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *UserAttributeDefinitionUpdate) SetType(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableType(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetOptions sets the "options" field. +func (_u *UserAttributeDefinitionUpdate) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdate { + _u.mutation.SetOptions(v) + return _u +} + +// AppendOptions appends value to the "options" field. +func (_u *UserAttributeDefinitionUpdate) AppendOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdate { + _u.mutation.AppendOptions(v) + return _u +} + +// SetRequired sets the "required" field. +func (_u *UserAttributeDefinitionUpdate) SetRequired(v bool) *UserAttributeDefinitionUpdate { + _u.mutation.SetRequired(v) + return _u +} + +// SetNillableRequired sets the "required" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableRequired(v *bool) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetRequired(*v) + } + return _u +} + +// SetValidation sets the "validation" field. +func (_u *UserAttributeDefinitionUpdate) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpdate { + _u.mutation.SetValidation(v) + return _u +} + +// SetPlaceholder sets the "placeholder" field. +func (_u *UserAttributeDefinitionUpdate) SetPlaceholder(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetPlaceholder(v) + return _u +} + +// SetNillablePlaceholder sets the "placeholder" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillablePlaceholder(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetPlaceholder(*v) + } + return _u +} + +// SetDisplayOrder sets the "display_order" field. +func (_u *UserAttributeDefinitionUpdate) SetDisplayOrder(v int) *UserAttributeDefinitionUpdate { + _u.mutation.ResetDisplayOrder() + _u.mutation.SetDisplayOrder(v) + return _u +} + +// SetNillableDisplayOrder sets the "display_order" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableDisplayOrder(v *int) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetDisplayOrder(*v) + } + return _u +} + +// AddDisplayOrder adds value to the "display_order" field. +func (_u *UserAttributeDefinitionUpdate) AddDisplayOrder(v int) *UserAttributeDefinitionUpdate { + _u.mutation.AddDisplayOrder(v) + return _u +} + +// SetEnabled sets the "enabled" field. +func (_u *UserAttributeDefinitionUpdate) SetEnabled(v bool) *UserAttributeDefinitionUpdate { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableEnabled(v *bool) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by IDs. +func (_u *UserAttributeDefinitionUpdate) AddValueIDs(ids ...int64) *UserAttributeDefinitionUpdate { + _u.mutation.AddValueIDs(ids...) + return _u +} + +// AddValues adds the "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdate) AddValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddValueIDs(ids...) +} + +// Mutation returns the UserAttributeDefinitionMutation object of the builder. +func (_u *UserAttributeDefinitionUpdate) Mutation() *UserAttributeDefinitionMutation { + return _u.mutation +} + +// ClearValues clears all "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdate) ClearValues() *UserAttributeDefinitionUpdate { + _u.mutation.ClearValues() + return _u +} + +// RemoveValueIDs removes the "values" edge to UserAttributeValue entities by IDs. +func (_u *UserAttributeDefinitionUpdate) RemoveValueIDs(ids ...int64) *UserAttributeDefinitionUpdate { + _u.mutation.RemoveValueIDs(ids...) + return _u +} + +// RemoveValues removes "values" edges to UserAttributeValue entities. +func (_u *UserAttributeDefinitionUpdate) RemoveValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveValueIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserAttributeDefinitionUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserAttributeDefinitionUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeDefinitionUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if userattributedefinition.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeDefinitionUpdate) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := userattributedefinition.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := userattributedefinition.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.name": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := userattributedefinition.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.type": %w`, err)} + } + } + if v, ok := _u.mutation.Placeholder(); ok { + if err := userattributedefinition.PlaceholderValidator(v); err != nil { + return &ValidationError{Name: "placeholder", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.placeholder": %w`, err)} + } + } + return nil +} + +func (_u *UserAttributeDefinitionUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributedefinition.Table, userattributedefinition.Columns, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributedefinition.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(userattributedefinition.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(userattributedefinition.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(userattributedefinition.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(userattributedefinition.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(userattributedefinition.FieldDescription, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(userattributedefinition.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Options(); ok { + _spec.SetField(userattributedefinition.FieldOptions, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedOptions(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, userattributedefinition.FieldOptions, value) + }) + } + if value, ok := _u.mutation.Required(); ok { + _spec.SetField(userattributedefinition.FieldRequired, field.TypeBool, value) + } + if value, ok := _u.mutation.Validation(); ok { + _spec.SetField(userattributedefinition.FieldValidation, field.TypeJSON, value) + } + if value, ok := _u.mutation.Placeholder(); ok { + _spec.SetField(userattributedefinition.FieldPlaceholder, field.TypeString, value) + } + if value, ok := _u.mutation.DisplayOrder(); ok { + _spec.SetField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDisplayOrder(); ok { + _spec.AddField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(userattributedefinition.FieldEnabled, field.TypeBool, value) + } + if _u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedValuesIDs(); len(nodes) > 0 && !_u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributedefinition.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserAttributeDefinitionUpdateOne is the builder for updating a single UserAttributeDefinition entity. +type UserAttributeDefinitionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserAttributeDefinitionMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeDefinitionUpdateOne) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdateOne) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableDeletedAt(v *time.Time) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdateOne) ClearDeletedAt() *UserAttributeDefinitionUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetKey sets the "key" field. +func (_u *UserAttributeDefinitionUpdateOne) SetKey(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableKey(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *UserAttributeDefinitionUpdateOne) SetName(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableName(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *UserAttributeDefinitionUpdateOne) SetDescription(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableDescription(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *UserAttributeDefinitionUpdateOne) SetType(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableType(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetOptions sets the "options" field. +func (_u *UserAttributeDefinitionUpdateOne) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetOptions(v) + return _u +} + +// AppendOptions appends value to the "options" field. +func (_u *UserAttributeDefinitionUpdateOne) AppendOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdateOne { + _u.mutation.AppendOptions(v) + return _u +} + +// SetRequired sets the "required" field. +func (_u *UserAttributeDefinitionUpdateOne) SetRequired(v bool) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetRequired(v) + return _u +} + +// SetNillableRequired sets the "required" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableRequired(v *bool) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetRequired(*v) + } + return _u +} + +// SetValidation sets the "validation" field. +func (_u *UserAttributeDefinitionUpdateOne) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetValidation(v) + return _u +} + +// SetPlaceholder sets the "placeholder" field. +func (_u *UserAttributeDefinitionUpdateOne) SetPlaceholder(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetPlaceholder(v) + return _u +} + +// SetNillablePlaceholder sets the "placeholder" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillablePlaceholder(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetPlaceholder(*v) + } + return _u +} + +// SetDisplayOrder sets the "display_order" field. +func (_u *UserAttributeDefinitionUpdateOne) SetDisplayOrder(v int) *UserAttributeDefinitionUpdateOne { + _u.mutation.ResetDisplayOrder() + _u.mutation.SetDisplayOrder(v) + return _u +} + +// SetNillableDisplayOrder sets the "display_order" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableDisplayOrder(v *int) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetDisplayOrder(*v) + } + return _u +} + +// AddDisplayOrder adds value to the "display_order" field. +func (_u *UserAttributeDefinitionUpdateOne) AddDisplayOrder(v int) *UserAttributeDefinitionUpdateOne { + _u.mutation.AddDisplayOrder(v) + return _u +} + +// SetEnabled sets the "enabled" field. +func (_u *UserAttributeDefinitionUpdateOne) SetEnabled(v bool) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableEnabled(v *bool) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by IDs. +func (_u *UserAttributeDefinitionUpdateOne) AddValueIDs(ids ...int64) *UserAttributeDefinitionUpdateOne { + _u.mutation.AddValueIDs(ids...) + return _u +} + +// AddValues adds the "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdateOne) AddValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddValueIDs(ids...) +} + +// Mutation returns the UserAttributeDefinitionMutation object of the builder. +func (_u *UserAttributeDefinitionUpdateOne) Mutation() *UserAttributeDefinitionMutation { + return _u.mutation +} + +// ClearValues clears all "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdateOne) ClearValues() *UserAttributeDefinitionUpdateOne { + _u.mutation.ClearValues() + return _u +} + +// RemoveValueIDs removes the "values" edge to UserAttributeValue entities by IDs. +func (_u *UserAttributeDefinitionUpdateOne) RemoveValueIDs(ids ...int64) *UserAttributeDefinitionUpdateOne { + _u.mutation.RemoveValueIDs(ids...) + return _u +} + +// RemoveValues removes "values" edges to UserAttributeValue entities. +func (_u *UserAttributeDefinitionUpdateOne) RemoveValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveValueIDs(ids...) +} + +// Where appends a list predicates to the UserAttributeDefinitionUpdate builder. +func (_u *UserAttributeDefinitionUpdateOne) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserAttributeDefinitionUpdateOne) Select(field string, fields ...string) *UserAttributeDefinitionUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserAttributeDefinition entity. +func (_u *UserAttributeDefinitionUpdateOne) Save(ctx context.Context) (*UserAttributeDefinition, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdateOne) SaveX(ctx context.Context) *UserAttributeDefinition { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserAttributeDefinitionUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeDefinitionUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if userattributedefinition.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeDefinitionUpdateOne) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := userattributedefinition.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := userattributedefinition.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.name": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := userattributedefinition.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.type": %w`, err)} + } + } + if v, ok := _u.mutation.Placeholder(); ok { + if err := userattributedefinition.PlaceholderValidator(v); err != nil { + return &ValidationError{Name: "placeholder", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.placeholder": %w`, err)} + } + } + return nil +} + +func (_u *UserAttributeDefinitionUpdateOne) sqlSave(ctx context.Context) (_node *UserAttributeDefinition, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributedefinition.Table, userattributedefinition.Columns, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UserAttributeDefinition.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributedefinition.FieldID) + for _, f := range fields { + if !userattributedefinition.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != userattributedefinition.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributedefinition.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(userattributedefinition.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(userattributedefinition.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(userattributedefinition.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(userattributedefinition.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(userattributedefinition.FieldDescription, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(userattributedefinition.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Options(); ok { + _spec.SetField(userattributedefinition.FieldOptions, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedOptions(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, userattributedefinition.FieldOptions, value) + }) + } + if value, ok := _u.mutation.Required(); ok { + _spec.SetField(userattributedefinition.FieldRequired, field.TypeBool, value) + } + if value, ok := _u.mutation.Validation(); ok { + _spec.SetField(userattributedefinition.FieldValidation, field.TypeJSON, value) + } + if value, ok := _u.mutation.Placeholder(); ok { + _spec.SetField(userattributedefinition.FieldPlaceholder, field.TypeString, value) + } + if value, ok := _u.mutation.DisplayOrder(); ok { + _spec.SetField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDisplayOrder(); ok { + _spec.AddField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(userattributedefinition.FieldEnabled, field.TypeBool, value) + } + if _u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedValuesIDs(); len(nodes) > 0 && !_u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserAttributeDefinition{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributedefinition.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/userattributevalue.go b/backend/ent/userattributevalue.go new file mode 100644 index 00000000..8dced925 --- /dev/null +++ b/backend/ent/userattributevalue.go @@ -0,0 +1,198 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValue is the model entity for the UserAttributeValue schema. +type UserAttributeValue struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // AttributeID holds the value of the "attribute_id" field. + AttributeID int64 `json:"attribute_id,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserAttributeValueQuery when eager-loading is set. + Edges UserAttributeValueEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserAttributeValueEdges holds the relations/edges for other nodes in the graph. +type UserAttributeValueEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Definition holds the value of the definition edge. + Definition *UserAttributeDefinition `json:"definition,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAttributeValueEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// DefinitionOrErr returns the Definition value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAttributeValueEdges) DefinitionOrErr() (*UserAttributeDefinition, error) { + if e.Definition != nil { + return e.Definition, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: userattributedefinition.Label} + } + return nil, &NotLoadedError{edge: "definition"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserAttributeValue) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case userattributevalue.FieldID, userattributevalue.FieldUserID, userattributevalue.FieldAttributeID: + values[i] = new(sql.NullInt64) + case userattributevalue.FieldValue: + values[i] = new(sql.NullString) + case userattributevalue.FieldCreatedAt, userattributevalue.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserAttributeValue fields. +func (_m *UserAttributeValue) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case userattributevalue.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case userattributevalue.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case userattributevalue.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case userattributevalue.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case userattributevalue.FieldAttributeID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field attribute_id", values[i]) + } else if value.Valid { + _m.AttributeID = value.Int64 + } + case userattributevalue.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + _m.Value = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the UserAttributeValue. +// This includes values selected through modifiers, order, etc. +func (_m *UserAttributeValue) GetValue(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the UserAttributeValue entity. +func (_m *UserAttributeValue) QueryUser() *UserQuery { + return NewUserAttributeValueClient(_m.config).QueryUser(_m) +} + +// QueryDefinition queries the "definition" edge of the UserAttributeValue entity. +func (_m *UserAttributeValue) QueryDefinition() *UserAttributeDefinitionQuery { + return NewUserAttributeValueClient(_m.config).QueryDefinition(_m) +} + +// Update returns a builder for updating this UserAttributeValue. +// Note that you need to call UserAttributeValue.Unwrap() before calling this method if this UserAttributeValue +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserAttributeValue) Update() *UserAttributeValueUpdateOne { + return NewUserAttributeValueClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserAttributeValue entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserAttributeValue) Unwrap() *UserAttributeValue { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserAttributeValue is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserAttributeValue) String() string { + var builder strings.Builder + builder.WriteString("UserAttributeValue(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("attribute_id=") + builder.WriteString(fmt.Sprintf("%v", _m.AttributeID)) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(_m.Value) + builder.WriteByte(')') + return builder.String() +} + +// UserAttributeValues is a parsable slice of UserAttributeValue. +type UserAttributeValues []*UserAttributeValue diff --git a/backend/ent/userattributevalue/userattributevalue.go b/backend/ent/userattributevalue/userattributevalue.go new file mode 100644 index 00000000..b8bb5842 --- /dev/null +++ b/backend/ent/userattributevalue/userattributevalue.go @@ -0,0 +1,139 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributevalue + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the userattributevalue type in the database. + Label = "user_attribute_value" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldAttributeID holds the string denoting the attribute_id field in the database. + FieldAttributeID = "attribute_id" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeDefinition holds the string denoting the definition edge name in mutations. + EdgeDefinition = "definition" + // Table holds the table name of the userattributevalue in the database. + Table = "user_attribute_values" + // UserTable is the table that holds the user relation/edge. + UserTable = "user_attribute_values" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // DefinitionTable is the table that holds the definition relation/edge. + DefinitionTable = "user_attribute_values" + // DefinitionInverseTable is the table name for the UserAttributeDefinition entity. + // It exists in this package in order to avoid circular dependency with the "userattributedefinition" package. + DefinitionInverseTable = "user_attribute_definitions" + // DefinitionColumn is the table column denoting the definition relation/edge. + DefinitionColumn = "attribute_id" +) + +// Columns holds all SQL columns for userattributevalue fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldUserID, + FieldAttributeID, + FieldValue, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultValue holds the default value on creation for the "value" field. + DefaultValue string +) + +// OrderOption defines the ordering options for the UserAttributeValue queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByAttributeID orders the results by the attribute_id field. +func ByAttributeID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAttributeID, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByDefinitionField orders the results by definition field. +func ByDefinitionField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDefinitionStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newDefinitionStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DefinitionInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DefinitionTable, DefinitionColumn), + ) +} diff --git a/backend/ent/userattributevalue/where.go b/backend/ent/userattributevalue/where.go new file mode 100644 index 00000000..43c3213e --- /dev/null +++ b/backend/ent/userattributevalue/where.go @@ -0,0 +1,327 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributevalue + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUserID, v)) +} + +// AttributeID applies equality check predicate on the "attribute_id" field. It's identical to AttributeIDEQ. +func AttributeID(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldAttributeID, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldValue, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldUserID, vs...)) +} + +// AttributeIDEQ applies the EQ predicate on the "attribute_id" field. +func AttributeIDEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldAttributeID, v)) +} + +// AttributeIDNEQ applies the NEQ predicate on the "attribute_id" field. +func AttributeIDNEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldAttributeID, v)) +} + +// AttributeIDIn applies the In predicate on the "attribute_id" field. +func AttributeIDIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldAttributeID, vs...)) +} + +// AttributeIDNotIn applies the NotIn predicate on the "attribute_id" field. +func AttributeIDNotIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldAttributeID, vs...)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldValue, v)) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldContains(FieldValue, v)) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldHasPrefix(FieldValue, v)) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldHasSuffix(FieldValue, v)) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEqualFold(FieldValue, v)) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldContainsFold(FieldValue, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDefinition applies the HasEdge predicate on the "definition" edge. +func HasDefinition() predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DefinitionTable, DefinitionColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDefinitionWith applies the HasEdge predicate on the "definition" edge with a given conditions (other predicates). +func HasDefinitionWith(preds ...predicate.UserAttributeDefinition) predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := newDefinitionStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserAttributeValue) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserAttributeValue) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserAttributeValue) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.NotPredicates(p)) +} diff --git a/backend/ent/userattributevalue_create.go b/backend/ent/userattributevalue_create.go new file mode 100644 index 00000000..c52481dc --- /dev/null +++ b/backend/ent/userattributevalue_create.go @@ -0,0 +1,731 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueCreate is the builder for creating a UserAttributeValue entity. +type UserAttributeValueCreate struct { + config + mutation *UserAttributeValueMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserAttributeValueCreate) SetCreatedAt(v time.Time) *UserAttributeValueCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserAttributeValueCreate) SetNillableCreatedAt(v *time.Time) *UserAttributeValueCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserAttributeValueCreate) SetUpdatedAt(v time.Time) *UserAttributeValueCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserAttributeValueCreate) SetNillableUpdatedAt(v *time.Time) *UserAttributeValueCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *UserAttributeValueCreate) SetUserID(v int64) *UserAttributeValueCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetAttributeID sets the "attribute_id" field. +func (_c *UserAttributeValueCreate) SetAttributeID(v int64) *UserAttributeValueCreate { + _c.mutation.SetAttributeID(v) + return _c +} + +// SetValue sets the "value" field. +func (_c *UserAttributeValueCreate) SetValue(v string) *UserAttributeValueCreate { + _c.mutation.SetValue(v) + return _c +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_c *UserAttributeValueCreate) SetNillableValue(v *string) *UserAttributeValueCreate { + if v != nil { + _c.SetValue(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *UserAttributeValueCreate) SetUser(v *User) *UserAttributeValueCreate { + return _c.SetUserID(v.ID) +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by ID. +func (_c *UserAttributeValueCreate) SetDefinitionID(id int64) *UserAttributeValueCreate { + _c.mutation.SetDefinitionID(id) + return _c +} + +// SetDefinition sets the "definition" edge to the UserAttributeDefinition entity. +func (_c *UserAttributeValueCreate) SetDefinition(v *UserAttributeDefinition) *UserAttributeValueCreate { + return _c.SetDefinitionID(v.ID) +} + +// Mutation returns the UserAttributeValueMutation object of the builder. +func (_c *UserAttributeValueCreate) Mutation() *UserAttributeValueMutation { + return _c.mutation +} + +// Save creates the UserAttributeValue in the database. +func (_c *UserAttributeValueCreate) Save(ctx context.Context) (*UserAttributeValue, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserAttributeValueCreate) SaveX(ctx context.Context) *UserAttributeValue { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeValueCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeValueCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserAttributeValueCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := userattributevalue.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := userattributevalue.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Value(); !ok { + v := userattributevalue.DefaultValue + _c.mutation.SetValue(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserAttributeValueCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserAttributeValue.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UserAttributeValue.updated_at"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "UserAttributeValue.user_id"`)} + } + if _, ok := _c.mutation.AttributeID(); !ok { + return &ValidationError{Name: "attribute_id", err: errors.New(`ent: missing required field "UserAttributeValue.attribute_id"`)} + } + if _, ok := _c.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "UserAttributeValue.value"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "UserAttributeValue.user"`)} + } + if len(_c.mutation.DefinitionIDs()) == 0 { + return &ValidationError{Name: "definition", err: errors.New(`ent: missing required edge "UserAttributeValue.definition"`)} + } + return nil +} + +func (_c *UserAttributeValueCreate) sqlSave(ctx context.Context) (*UserAttributeValue, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserAttributeValueCreate) createSpec() (*UserAttributeValue, *sqlgraph.CreateSpec) { + var ( + _node = &UserAttributeValue{config: _c.config} + _spec = sqlgraph.NewCreateSpec(userattributevalue.Table, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(userattributevalue.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(userattributevalue.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.Value(); ok { + _spec.SetField(userattributevalue.FieldValue, field.TypeString, value) + _node.Value = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.DefinitionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AttributeID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeValue.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeValueUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeValueCreate) OnConflict(opts ...sql.ConflictOption) *UserAttributeValueUpsertOne { + _c.conflict = opts + return &UserAttributeValueUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeValueCreate) OnConflictColumns(columns ...string) *UserAttributeValueUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeValueUpsertOne{ + create: _c, + } +} + +type ( + // UserAttributeValueUpsertOne is the builder for "upsert"-ing + // one UserAttributeValue node. + UserAttributeValueUpsertOne struct { + create *UserAttributeValueCreate + } + + // UserAttributeValueUpsert is the "OnConflict" setter. + UserAttributeValueUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeValueUpsert) SetUpdatedAt(v time.Time) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateUpdatedAt() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldUpdatedAt) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserAttributeValueUpsert) SetUserID(v int64) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateUserID() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldUserID) + return u +} + +// SetAttributeID sets the "attribute_id" field. +func (u *UserAttributeValueUpsert) SetAttributeID(v int64) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldAttributeID, v) + return u +} + +// UpdateAttributeID sets the "attribute_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateAttributeID() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldAttributeID) + return u +} + +// SetValue sets the "value" field. +func (u *UserAttributeValueUpsert) SetValue(v string) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldValue, v) + return u +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateValue() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldValue) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeValueUpsertOne) UpdateNewValues() *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(userattributevalue.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeValueUpsertOne) Ignore() *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeValueUpsertOne) DoNothing() *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeValueCreate.OnConflict +// documentation for more info. +func (u *UserAttributeValueUpsertOne) Update(set func(*UserAttributeValueUpsert)) *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeValueUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeValueUpsertOne) SetUpdatedAt(v time.Time) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateUpdatedAt() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserAttributeValueUpsertOne) SetUserID(v int64) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateUserID() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUserID() + }) +} + +// SetAttributeID sets the "attribute_id" field. +func (u *UserAttributeValueUpsertOne) SetAttributeID(v int64) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetAttributeID(v) + }) +} + +// UpdateAttributeID sets the "attribute_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateAttributeID() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateAttributeID() + }) +} + +// SetValue sets the "value" field. +func (u *UserAttributeValueUpsertOne) SetValue(v string) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateValue() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateValue() + }) +} + +// Exec executes the query. +func (u *UserAttributeValueUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeValueCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeValueUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserAttributeValueUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserAttributeValueUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserAttributeValueCreateBulk is the builder for creating many UserAttributeValue entities in bulk. +type UserAttributeValueCreateBulk struct { + config + err error + builders []*UserAttributeValueCreate + conflict []sql.ConflictOption +} + +// Save creates the UserAttributeValue entities in the database. +func (_c *UserAttributeValueCreateBulk) Save(ctx context.Context) ([]*UserAttributeValue, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserAttributeValue, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserAttributeValueMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserAttributeValueCreateBulk) SaveX(ctx context.Context) []*UserAttributeValue { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeValueCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeValueCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeValue.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeValueUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeValueCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserAttributeValueUpsertBulk { + _c.conflict = opts + return &UserAttributeValueUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeValueCreateBulk) OnConflictColumns(columns ...string) *UserAttributeValueUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeValueUpsertBulk{ + create: _c, + } +} + +// UserAttributeValueUpsertBulk is the builder for "upsert"-ing +// a bulk of UserAttributeValue nodes. +type UserAttributeValueUpsertBulk struct { + create *UserAttributeValueCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeValueUpsertBulk) UpdateNewValues() *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(userattributevalue.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeValueUpsertBulk) Ignore() *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeValueUpsertBulk) DoNothing() *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeValueCreateBulk.OnConflict +// documentation for more info. +func (u *UserAttributeValueUpsertBulk) Update(set func(*UserAttributeValueUpsert)) *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeValueUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeValueUpsertBulk) SetUpdatedAt(v time.Time) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateUpdatedAt() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserAttributeValueUpsertBulk) SetUserID(v int64) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateUserID() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUserID() + }) +} + +// SetAttributeID sets the "attribute_id" field. +func (u *UserAttributeValueUpsertBulk) SetAttributeID(v int64) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetAttributeID(v) + }) +} + +// UpdateAttributeID sets the "attribute_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateAttributeID() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateAttributeID() + }) +} + +// SetValue sets the "value" field. +func (u *UserAttributeValueUpsertBulk) SetValue(v string) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateValue() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateValue() + }) +} + +// Exec executes the query. +func (u *UserAttributeValueUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserAttributeValueCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeValueCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeValueUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributevalue_delete.go b/backend/ent/userattributevalue_delete.go new file mode 100644 index 00000000..2805e49f --- /dev/null +++ b/backend/ent/userattributevalue_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueDelete is the builder for deleting a UserAttributeValue entity. +type UserAttributeValueDelete struct { + config + hooks []Hook + mutation *UserAttributeValueMutation +} + +// Where appends a list predicates to the UserAttributeValueDelete builder. +func (_d *UserAttributeValueDelete) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserAttributeValueDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeValueDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserAttributeValueDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(userattributevalue.Table, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserAttributeValueDeleteOne is the builder for deleting a single UserAttributeValue entity. +type UserAttributeValueDeleteOne struct { + _d *UserAttributeValueDelete +} + +// Where appends a list predicates to the UserAttributeValueDelete builder. +func (_d *UserAttributeValueDeleteOne) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserAttributeValueDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{userattributevalue.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeValueDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributevalue_query.go b/backend/ent/userattributevalue_query.go new file mode 100644 index 00000000..babfc9a9 --- /dev/null +++ b/backend/ent/userattributevalue_query.go @@ -0,0 +1,681 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueQuery is the builder for querying UserAttributeValue entities. +type UserAttributeValueQuery struct { + config + ctx *QueryContext + order []userattributevalue.OrderOption + inters []Interceptor + predicates []predicate.UserAttributeValue + withUser *UserQuery + withDefinition *UserAttributeDefinitionQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserAttributeValueQuery builder. +func (_q *UserAttributeValueQuery) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserAttributeValueQuery) Limit(limit int) *UserAttributeValueQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserAttributeValueQuery) Offset(offset int) *UserAttributeValueQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserAttributeValueQuery) Unique(unique bool) *UserAttributeValueQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserAttributeValueQuery) Order(o ...userattributevalue.OrderOption) *UserAttributeValueQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *UserAttributeValueQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.UserTable, userattributevalue.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDefinition chains the current query on the "definition" edge. +func (_q *UserAttributeValueQuery) QueryDefinition() *UserAttributeDefinitionQuery { + query := (&UserAttributeDefinitionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, selector), + sqlgraph.To(userattributedefinition.Table, userattributedefinition.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.DefinitionTable, userattributevalue.DefinitionColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserAttributeValue entity from the query. +// Returns a *NotFoundError when no UserAttributeValue was found. +func (_q *UserAttributeValueQuery) First(ctx context.Context) (*UserAttributeValue, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{userattributevalue.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserAttributeValueQuery) FirstX(ctx context.Context) *UserAttributeValue { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UserAttributeValue ID from the query. +// Returns a *NotFoundError when no UserAttributeValue ID was found. +func (_q *UserAttributeValueQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{userattributevalue.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserAttributeValueQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UserAttributeValue entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserAttributeValue entity is found. +// Returns a *NotFoundError when no UserAttributeValue entities are found. +func (_q *UserAttributeValueQuery) Only(ctx context.Context) (*UserAttributeValue, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{userattributevalue.Label} + default: + return nil, &NotSingularError{userattributevalue.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserAttributeValueQuery) OnlyX(ctx context.Context) *UserAttributeValue { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UserAttributeValue ID in the query. +// Returns a *NotSingularError when more than one UserAttributeValue ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserAttributeValueQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{userattributevalue.Label} + default: + err = &NotSingularError{userattributevalue.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserAttributeValueQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UserAttributeValues. +func (_q *UserAttributeValueQuery) All(ctx context.Context) ([]*UserAttributeValue, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserAttributeValue, *UserAttributeValueQuery]() + return withInterceptors[[]*UserAttributeValue](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserAttributeValueQuery) AllX(ctx context.Context) []*UserAttributeValue { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UserAttributeValue IDs. +func (_q *UserAttributeValueQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(userattributevalue.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserAttributeValueQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserAttributeValueQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserAttributeValueQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserAttributeValueQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserAttributeValueQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserAttributeValueQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserAttributeValueQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserAttributeValueQuery) Clone() *UserAttributeValueQuery { + if _q == nil { + return nil + } + return &UserAttributeValueQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]userattributevalue.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserAttributeValue{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withDefinition: _q.withDefinition.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAttributeValueQuery) WithUser(opts ...func(*UserQuery)) *UserAttributeValueQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithDefinition tells the query-builder to eager-load the nodes that are connected to +// the "definition" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAttributeValueQuery) WithDefinition(opts ...func(*UserAttributeDefinitionQuery)) *UserAttributeValueQuery { + query := (&UserAttributeDefinitionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withDefinition = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserAttributeValue.Query(). +// GroupBy(userattributevalue.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserAttributeValueQuery) GroupBy(field string, fields ...string) *UserAttributeValueGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserAttributeValueGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = userattributevalue.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UserAttributeValue.Query(). +// Select(userattributevalue.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserAttributeValueQuery) Select(fields ...string) *UserAttributeValueSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserAttributeValueSelect{UserAttributeValueQuery: _q} + sbuild.label = userattributevalue.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserAttributeValueSelect configured with the given aggregations. +func (_q *UserAttributeValueQuery) Aggregate(fns ...AggregateFunc) *UserAttributeValueSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserAttributeValueQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !userattributevalue.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserAttributeValueQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserAttributeValue, error) { + var ( + nodes = []*UserAttributeValue{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withUser != nil, + _q.withDefinition != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserAttributeValue).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserAttributeValue{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *UserAttributeValue, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withDefinition; query != nil { + if err := _q.loadDefinition(ctx, query, nodes, nil, + func(n *UserAttributeValue, e *UserAttributeDefinition) { n.Edges.Definition = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserAttributeValueQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*UserAttributeValue, init func(*UserAttributeValue), assign func(*UserAttributeValue, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAttributeValue) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserAttributeValueQuery) loadDefinition(ctx context.Context, query *UserAttributeDefinitionQuery, nodes []*UserAttributeValue, init func(*UserAttributeValue), assign func(*UserAttributeValue, *UserAttributeDefinition)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAttributeValue) + for i := range nodes { + fk := nodes[i].AttributeID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(userattributedefinition.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "attribute_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *UserAttributeValueQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserAttributeValueQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(userattributevalue.Table, userattributevalue.Columns, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributevalue.FieldID) + for i := range fields { + if fields[i] != userattributevalue.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(userattributevalue.FieldUserID) + } + if _q.withDefinition != nil { + _spec.Node.AddColumnOnce(userattributevalue.FieldAttributeID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserAttributeValueQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(userattributevalue.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = userattributevalue.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserAttributeValueGroupBy is the group-by builder for UserAttributeValue entities. +type UserAttributeValueGroupBy struct { + selector + build *UserAttributeValueQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserAttributeValueGroupBy) Aggregate(fns ...AggregateFunc) *UserAttributeValueGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserAttributeValueGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeValueQuery, *UserAttributeValueGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserAttributeValueGroupBy) sqlScan(ctx context.Context, root *UserAttributeValueQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserAttributeValueSelect is the builder for selecting fields of UserAttributeValue entities. +type UserAttributeValueSelect struct { + *UserAttributeValueQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserAttributeValueSelect) Aggregate(fns ...AggregateFunc) *UserAttributeValueSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserAttributeValueSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeValueQuery, *UserAttributeValueSelect](ctx, _s.UserAttributeValueQuery, _s, _s.inters, v) +} + +func (_s *UserAttributeValueSelect) sqlScan(ctx context.Context, root *UserAttributeValueQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/userattributevalue_update.go b/backend/ent/userattributevalue_update.go new file mode 100644 index 00000000..7dfce024 --- /dev/null +++ b/backend/ent/userattributevalue_update.go @@ -0,0 +1,504 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueUpdate is the builder for updating UserAttributeValue entities. +type UserAttributeValueUpdate struct { + config + hooks []Hook + mutation *UserAttributeValueMutation +} + +// Where appends a list predicates to the UserAttributeValueUpdate builder. +func (_u *UserAttributeValueUpdate) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeValueUpdate) SetUpdatedAt(v time.Time) *UserAttributeValueUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserAttributeValueUpdate) SetUserID(v int64) *UserAttributeValueUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdate) SetNillableUserID(v *int64) *UserAttributeValueUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetAttributeID sets the "attribute_id" field. +func (_u *UserAttributeValueUpdate) SetAttributeID(v int64) *UserAttributeValueUpdate { + _u.mutation.SetAttributeID(v) + return _u +} + +// SetNillableAttributeID sets the "attribute_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdate) SetNillableAttributeID(v *int64) *UserAttributeValueUpdate { + if v != nil { + _u.SetAttributeID(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *UserAttributeValueUpdate) SetValue(v string) *UserAttributeValueUpdate { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *UserAttributeValueUpdate) SetNillableValue(v *string) *UserAttributeValueUpdate { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAttributeValueUpdate) SetUser(v *User) *UserAttributeValueUpdate { + return _u.SetUserID(v.ID) +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by ID. +func (_u *UserAttributeValueUpdate) SetDefinitionID(id int64) *UserAttributeValueUpdate { + _u.mutation.SetDefinitionID(id) + return _u +} + +// SetDefinition sets the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdate) SetDefinition(v *UserAttributeDefinition) *UserAttributeValueUpdate { + return _u.SetDefinitionID(v.ID) +} + +// Mutation returns the UserAttributeValueMutation object of the builder. +func (_u *UserAttributeValueUpdate) Mutation() *UserAttributeValueMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAttributeValueUpdate) ClearUser() *UserAttributeValueUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearDefinition clears the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdate) ClearDefinition() *UserAttributeValueUpdate { + _u.mutation.ClearDefinition() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserAttributeValueUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeValueUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserAttributeValueUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeValueUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeValueUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := userattributevalue.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeValueUpdate) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.user"`) + } + if _u.mutation.DefinitionCleared() && len(_u.mutation.DefinitionIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.definition"`) + } + return nil +} + +func (_u *UserAttributeValueUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributevalue.Table, userattributevalue.Columns, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributevalue.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(userattributevalue.FieldValue, field.TypeString, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.DefinitionCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.DefinitionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributevalue.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserAttributeValueUpdateOne is the builder for updating a single UserAttributeValue entity. +type UserAttributeValueUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserAttributeValueMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeValueUpdateOne) SetUpdatedAt(v time.Time) *UserAttributeValueUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserAttributeValueUpdateOne) SetUserID(v int64) *UserAttributeValueUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdateOne) SetNillableUserID(v *int64) *UserAttributeValueUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetAttributeID sets the "attribute_id" field. +func (_u *UserAttributeValueUpdateOne) SetAttributeID(v int64) *UserAttributeValueUpdateOne { + _u.mutation.SetAttributeID(v) + return _u +} + +// SetNillableAttributeID sets the "attribute_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdateOne) SetNillableAttributeID(v *int64) *UserAttributeValueUpdateOne { + if v != nil { + _u.SetAttributeID(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *UserAttributeValueUpdateOne) SetValue(v string) *UserAttributeValueUpdateOne { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *UserAttributeValueUpdateOne) SetNillableValue(v *string) *UserAttributeValueUpdateOne { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAttributeValueUpdateOne) SetUser(v *User) *UserAttributeValueUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by ID. +func (_u *UserAttributeValueUpdateOne) SetDefinitionID(id int64) *UserAttributeValueUpdateOne { + _u.mutation.SetDefinitionID(id) + return _u +} + +// SetDefinition sets the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdateOne) SetDefinition(v *UserAttributeDefinition) *UserAttributeValueUpdateOne { + return _u.SetDefinitionID(v.ID) +} + +// Mutation returns the UserAttributeValueMutation object of the builder. +func (_u *UserAttributeValueUpdateOne) Mutation() *UserAttributeValueMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAttributeValueUpdateOne) ClearUser() *UserAttributeValueUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearDefinition clears the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdateOne) ClearDefinition() *UserAttributeValueUpdateOne { + _u.mutation.ClearDefinition() + return _u +} + +// Where appends a list predicates to the UserAttributeValueUpdate builder. +func (_u *UserAttributeValueUpdateOne) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserAttributeValueUpdateOne) Select(field string, fields ...string) *UserAttributeValueUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserAttributeValue entity. +func (_u *UserAttributeValueUpdateOne) Save(ctx context.Context) (*UserAttributeValue, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeValueUpdateOne) SaveX(ctx context.Context) *UserAttributeValue { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserAttributeValueUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeValueUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeValueUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := userattributevalue.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeValueUpdateOne) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.user"`) + } + if _u.mutation.DefinitionCleared() && len(_u.mutation.DefinitionIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.definition"`) + } + return nil +} + +func (_u *UserAttributeValueUpdateOne) sqlSave(ctx context.Context) (_node *UserAttributeValue, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributevalue.Table, userattributevalue.Columns, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UserAttributeValue.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributevalue.FieldID) + for _, f := range fields { + if !userattributevalue.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != userattributevalue.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributevalue.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(userattributevalue.FieldValue, field.TypeString, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.DefinitionCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.DefinitionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserAttributeValue{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributevalue.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/internal/handler/admin/user_attribute_handler.go b/backend/internal/handler/admin/user_attribute_handler.go new file mode 100644 index 00000000..2f326279 --- /dev/null +++ b/backend/internal/handler/admin/user_attribute_handler.go @@ -0,0 +1,342 @@ +package admin + +import ( + "strconv" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// UserAttributeHandler handles user attribute management +type UserAttributeHandler struct { + attrService *service.UserAttributeService +} + +// NewUserAttributeHandler creates a new handler +func NewUserAttributeHandler(attrService *service.UserAttributeService) *UserAttributeHandler { + return &UserAttributeHandler{attrService: attrService} +} + +// --- Request/Response DTOs --- + +// CreateAttributeDefinitionRequest represents create attribute definition request +type CreateAttributeDefinitionRequest struct { + Key string `json:"key" binding:"required,min=1,max=100"` + Name string `json:"name" binding:"required,min=1,max=255"` + Description string `json:"description"` + Type string `json:"type" binding:"required"` + Options []service.UserAttributeOption `json:"options"` + Required bool `json:"required"` + Validation service.UserAttributeValidation `json:"validation"` + Placeholder string `json:"placeholder"` + Enabled bool `json:"enabled"` +} + +// UpdateAttributeDefinitionRequest represents update attribute definition request +type UpdateAttributeDefinitionRequest struct { + Name *string `json:"name"` + Description *string `json:"description"` + Type *string `json:"type"` + Options *[]service.UserAttributeOption `json:"options"` + Required *bool `json:"required"` + Validation *service.UserAttributeValidation `json:"validation"` + Placeholder *string `json:"placeholder"` + Enabled *bool `json:"enabled"` +} + +// ReorderRequest represents reorder attribute definitions request +type ReorderRequest struct { + IDs []int64 `json:"ids" binding:"required"` +} + +// UpdateUserAttributesRequest represents update user attributes request +type UpdateUserAttributesRequest struct { + Values map[int64]string `json:"values" binding:"required"` +} + +// BatchGetUserAttributesRequest represents batch get user attributes request +type BatchGetUserAttributesRequest struct { + UserIDs []int64 `json:"user_ids" binding:"required"` +} + +// BatchUserAttributesResponse represents batch user attributes response +type BatchUserAttributesResponse struct { + // Map of userID -> map of attributeID -> value + Attributes map[int64]map[int64]string `json:"attributes"` +} + +// AttributeDefinitionResponse represents attribute definition response +type AttributeDefinitionResponse struct { + ID int64 `json:"id"` + Key string `json:"key"` + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type"` + Options []service.UserAttributeOption `json:"options"` + Required bool `json:"required"` + Validation service.UserAttributeValidation `json:"validation"` + Placeholder string `json:"placeholder"` + DisplayOrder int `json:"display_order"` + Enabled bool `json:"enabled"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +// AttributeValueResponse represents attribute value response +type AttributeValueResponse struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + AttributeID int64 `json:"attribute_id"` + Value string `json:"value"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +// --- Helpers --- + +func defToResponse(def *service.UserAttributeDefinition) *AttributeDefinitionResponse { + return &AttributeDefinitionResponse{ + ID: def.ID, + Key: def.Key, + Name: def.Name, + Description: def.Description, + Type: string(def.Type), + Options: def.Options, + Required: def.Required, + Validation: def.Validation, + Placeholder: def.Placeholder, + DisplayOrder: def.DisplayOrder, + Enabled: def.Enabled, + CreatedAt: def.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + UpdatedAt: def.UpdatedAt.Format("2006-01-02T15:04:05Z07:00"), + } +} + +func valueToResponse(val *service.UserAttributeValue) *AttributeValueResponse { + return &AttributeValueResponse{ + ID: val.ID, + UserID: val.UserID, + AttributeID: val.AttributeID, + Value: val.Value, + CreatedAt: val.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + UpdatedAt: val.UpdatedAt.Format("2006-01-02T15:04:05Z07:00"), + } +} + +// --- Handlers --- + +// ListDefinitions lists all attribute definitions +// GET /admin/user-attributes +func (h *UserAttributeHandler) ListDefinitions(c *gin.Context) { + enabledOnly := c.Query("enabled") == "true" + + defs, err := h.attrService.ListDefinitions(c.Request.Context(), enabledOnly) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]*AttributeDefinitionResponse, 0, len(defs)) + for i := range defs { + out = append(out, defToResponse(&defs[i])) + } + + response.Success(c, out) +} + +// CreateDefinition creates a new attribute definition +// POST /admin/user-attributes +func (h *UserAttributeHandler) CreateDefinition(c *gin.Context) { + var req CreateAttributeDefinitionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + def, err := h.attrService.CreateDefinition(c.Request.Context(), service.CreateAttributeDefinitionInput{ + Key: req.Key, + Name: req.Name, + Description: req.Description, + Type: service.UserAttributeType(req.Type), + Options: req.Options, + Required: req.Required, + Validation: req.Validation, + Placeholder: req.Placeholder, + Enabled: req.Enabled, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, defToResponse(def)) +} + +// UpdateDefinition updates an attribute definition +// PUT /admin/user-attributes/:id +func (h *UserAttributeHandler) UpdateDefinition(c *gin.Context) { + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid attribute ID") + return + } + + var req UpdateAttributeDefinitionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + input := service.UpdateAttributeDefinitionInput{ + Name: req.Name, + Description: req.Description, + Options: req.Options, + Required: req.Required, + Validation: req.Validation, + Placeholder: req.Placeholder, + Enabled: req.Enabled, + } + if req.Type != nil { + t := service.UserAttributeType(*req.Type) + input.Type = &t + } + + def, err := h.attrService.UpdateDefinition(c.Request.Context(), id, input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, defToResponse(def)) +} + +// DeleteDefinition deletes an attribute definition +// DELETE /admin/user-attributes/:id +func (h *UserAttributeHandler) DeleteDefinition(c *gin.Context) { + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid attribute ID") + return + } + + if err := h.attrService.DeleteDefinition(c.Request.Context(), id); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Attribute definition deleted successfully"}) +} + +// ReorderDefinitions reorders attribute definitions +// PUT /admin/user-attributes/reorder +func (h *UserAttributeHandler) ReorderDefinitions(c *gin.Context) { + var req ReorderRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Convert IDs array to orders map (position in array = display_order) + orders := make(map[int64]int, len(req.IDs)) + for i, id := range req.IDs { + orders[id] = i + } + + if err := h.attrService.ReorderDefinitions(c.Request.Context(), orders); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Reorder successful"}) +} + +// GetUserAttributes gets a user's attribute values +// GET /admin/users/:id/attributes +func (h *UserAttributeHandler) GetUserAttributes(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + values, err := h.attrService.GetUserAttributes(c.Request.Context(), userID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]*AttributeValueResponse, 0, len(values)) + for i := range values { + out = append(out, valueToResponse(&values[i])) + } + + response.Success(c, out) +} + +// UpdateUserAttributes updates a user's attribute values +// PUT /admin/users/:id/attributes +func (h *UserAttributeHandler) UpdateUserAttributes(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + var req UpdateUserAttributesRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + inputs := make([]service.UpdateUserAttributeInput, 0, len(req.Values)) + for attrID, value := range req.Values { + inputs = append(inputs, service.UpdateUserAttributeInput{ + AttributeID: attrID, + Value: value, + }) + } + + if err := h.attrService.UpdateUserAttributes(c.Request.Context(), userID, inputs); err != nil { + response.ErrorFrom(c, err) + return + } + + // Return updated values + values, err := h.attrService.GetUserAttributes(c.Request.Context(), userID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]*AttributeValueResponse, 0, len(values)) + for i := range values { + out = append(out, valueToResponse(&values[i])) + } + + response.Success(c, out) +} + +// GetBatchUserAttributes gets attribute values for multiple users +// POST /admin/user-attributes/batch +func (h *UserAttributeHandler) GetBatchUserAttributes(c *gin.Context) { + var req BatchGetUserAttributesRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if len(req.UserIDs) == 0 { + response.Success(c, BatchUserAttributesResponse{Attributes: map[int64]map[int64]string{}}) + return + } + + attrs, err := h.attrService.GetBatchUserAttributes(c.Request.Context(), req.UserIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, BatchUserAttributesResponse{Attributes: attrs}) +} diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index 85105a30..817b71d3 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -20,6 +20,7 @@ type AdminHandlers struct { System *admin.SystemHandler Subscription *admin.SubscriptionHandler Usage *admin.UsageHandler + UserAttribute *admin.UserAttributeHandler } // Handlers contains all HTTP handlers diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index fc9f1642..1695f8a9 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -23,6 +23,7 @@ func ProvideAdminHandlers( systemHandler *admin.SystemHandler, subscriptionHandler *admin.SubscriptionHandler, usageHandler *admin.UsageHandler, + userAttributeHandler *admin.UserAttributeHandler, ) *AdminHandlers { return &AdminHandlers{ Dashboard: dashboardHandler, @@ -39,6 +40,7 @@ func ProvideAdminHandlers( System: systemHandler, Subscription: subscriptionHandler, Usage: usageHandler, + UserAttribute: userAttributeHandler, } } @@ -107,6 +109,7 @@ var ProviderSet = wire.NewSet( ProvideSystemHandler, admin.NewSubscriptionHandler, admin.NewUsageHandler, + admin.NewUserAttributeHandler, // AdminHandlers and Handlers constructors ProvideAdminHandlers, diff --git a/backend/internal/repository/user_attribute_repo.go b/backend/internal/repository/user_attribute_repo.go new file mode 100644 index 00000000..504aa91e --- /dev/null +++ b/backend/internal/repository/user_attribute_repo.go @@ -0,0 +1,387 @@ +package repository + +import ( + "context" + "database/sql" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +// UserAttributeDefinitionRepository implementation +type userAttributeDefinitionRepository struct { + client *dbent.Client +} + +// NewUserAttributeDefinitionRepository creates a new repository instance +func NewUserAttributeDefinitionRepository(client *dbent.Client) service.UserAttributeDefinitionRepository { + return &userAttributeDefinitionRepository{client: client} +} + +func (r *userAttributeDefinitionRepository) Create(ctx context.Context, def *service.UserAttributeDefinition) error { + client := clientFromContext(ctx, r.client) + + created, err := client.UserAttributeDefinition.Create(). + SetKey(def.Key). + SetName(def.Name). + SetDescription(def.Description). + SetType(string(def.Type)). + SetOptions(toEntOptions(def.Options)). + SetRequired(def.Required). + SetValidation(toEntValidation(def.Validation)). + SetPlaceholder(def.Placeholder). + SetEnabled(def.Enabled). + Save(ctx) + + if err != nil { + return translatePersistenceError(err, nil, service.ErrAttributeKeyExists) + } + + def.ID = created.ID + def.DisplayOrder = created.DisplayOrder + def.CreatedAt = created.CreatedAt + def.UpdatedAt = created.UpdatedAt + return nil +} + +func (r *userAttributeDefinitionRepository) GetByID(ctx context.Context, id int64) (*service.UserAttributeDefinition, error) { + client := clientFromContext(ctx, r.client) + + e, err := client.UserAttributeDefinition.Query(). + Where(userattributedefinition.IDEQ(id)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) + } + return defEntityToService(e), nil +} + +func (r *userAttributeDefinitionRepository) GetByKey(ctx context.Context, key string) (*service.UserAttributeDefinition, error) { + client := clientFromContext(ctx, r.client) + + e, err := client.UserAttributeDefinition.Query(). + Where(userattributedefinition.KeyEQ(key)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) + } + return defEntityToService(e), nil +} + +func (r *userAttributeDefinitionRepository) Update(ctx context.Context, def *service.UserAttributeDefinition) error { + client := clientFromContext(ctx, r.client) + + updated, err := client.UserAttributeDefinition.UpdateOneID(def.ID). + SetName(def.Name). + SetDescription(def.Description). + SetType(string(def.Type)). + SetOptions(toEntOptions(def.Options)). + SetRequired(def.Required). + SetValidation(toEntValidation(def.Validation)). + SetPlaceholder(def.Placeholder). + SetDisplayOrder(def.DisplayOrder). + SetEnabled(def.Enabled). + Save(ctx) + + if err != nil { + return translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, service.ErrAttributeKeyExists) + } + + def.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *userAttributeDefinitionRepository) Delete(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + + _, err := client.UserAttributeDefinition.Delete(). + Where(userattributedefinition.IDEQ(id)). + Exec(ctx) + return translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) +} + +func (r *userAttributeDefinitionRepository) List(ctx context.Context, enabledOnly bool) ([]service.UserAttributeDefinition, error) { + client := clientFromContext(ctx, r.client) + + q := client.UserAttributeDefinition.Query() + if enabledOnly { + q = q.Where(userattributedefinition.EnabledEQ(true)) + } + + entities, err := q.Order(dbent.Asc(userattributedefinition.FieldDisplayOrder)).All(ctx) + if err != nil { + return nil, err + } + + result := make([]service.UserAttributeDefinition, 0, len(entities)) + for _, e := range entities { + result = append(result, *defEntityToService(e)) + } + return result, nil +} + +func (r *userAttributeDefinitionRepository) UpdateDisplayOrders(ctx context.Context, orders map[int64]int) error { + tx, err := r.client.Tx(ctx) + if err != nil { + return err + } + defer func() { _ = tx.Rollback() }() + + for id, order := range orders { + if _, err := tx.UserAttributeDefinition.UpdateOneID(id). + SetDisplayOrder(order). + Save(ctx); err != nil { + return translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) + } + } + + return tx.Commit() +} + +func (r *userAttributeDefinitionRepository) ExistsByKey(ctx context.Context, key string) (bool, error) { + client := clientFromContext(ctx, r.client) + return client.UserAttributeDefinition.Query(). + Where(userattributedefinition.KeyEQ(key)). + Exist(ctx) +} + +// UserAttributeValueRepository implementation +type userAttributeValueRepository struct { + client *dbent.Client + sql *sql.DB +} + +// NewUserAttributeValueRepository creates a new repository instance +func NewUserAttributeValueRepository(client *dbent.Client, sqlDB *sql.DB) service.UserAttributeValueRepository { + return &userAttributeValueRepository{client: client, sql: sqlDB} +} + +func (r *userAttributeValueRepository) GetByUserID(ctx context.Context, userID int64) ([]service.UserAttributeValue, error) { + client := clientFromContext(ctx, r.client) + + entities, err := client.UserAttributeValue.Query(). + Where(userattributevalue.UserIDEQ(userID)). + All(ctx) + if err != nil { + return nil, err + } + + result := make([]service.UserAttributeValue, 0, len(entities)) + for _, e := range entities { + result = append(result, service.UserAttributeValue{ + ID: e.ID, + UserID: e.UserID, + AttributeID: e.AttributeID, + Value: e.Value, + CreatedAt: e.CreatedAt, + UpdatedAt: e.UpdatedAt, + }) + } + return result, nil +} + +func (r *userAttributeValueRepository) GetByUserIDs(ctx context.Context, userIDs []int64) ([]service.UserAttributeValue, error) { + if len(userIDs) == 0 { + return []service.UserAttributeValue{}, nil + } + + client := clientFromContext(ctx, r.client) + + entities, err := client.UserAttributeValue.Query(). + Where(userattributevalue.UserIDIn(userIDs...)). + All(ctx) + if err != nil { + return nil, err + } + + result := make([]service.UserAttributeValue, 0, len(entities)) + for _, e := range entities { + result = append(result, service.UserAttributeValue{ + ID: e.ID, + UserID: e.UserID, + AttributeID: e.AttributeID, + Value: e.Value, + CreatedAt: e.CreatedAt, + UpdatedAt: e.UpdatedAt, + }) + } + return result, nil +} + +func (r *userAttributeValueRepository) UpsertBatch(ctx context.Context, userID int64, inputs []service.UpdateUserAttributeInput) error { + if len(inputs) == 0 { + return nil + } + + tx, err := r.client.Tx(ctx) + if err != nil { + return err + } + defer func() { _ = tx.Rollback() }() + + for _, input := range inputs { + // Use upsert (ON CONFLICT DO UPDATE) + err := tx.UserAttributeValue.Create(). + SetUserID(userID). + SetAttributeID(input.AttributeID). + SetValue(input.Value). + OnConflictColumns(userattributevalue.FieldUserID, userattributevalue.FieldAttributeID). + UpdateValue(). + UpdateUpdatedAt(). + Exec(ctx) + if err != nil { + return err + } + } + + return tx.Commit() +} + +func (r *userAttributeValueRepository) DeleteByAttributeID(ctx context.Context, attributeID int64) error { + client := clientFromContext(ctx, r.client) + + _, err := client.UserAttributeValue.Delete(). + Where(userattributevalue.AttributeIDEQ(attributeID)). + Exec(ctx) + return err +} + +func (r *userAttributeValueRepository) DeleteByUserID(ctx context.Context, userID int64) error { + client := clientFromContext(ctx, r.client) + + _, err := client.UserAttributeValue.Delete(). + Where(userattributevalue.UserIDEQ(userID)). + Exec(ctx) + return err +} + +// Helper functions for entity to service conversion +func defEntityToService(e *dbent.UserAttributeDefinition) *service.UserAttributeDefinition { + if e == nil { + return nil + } + return &service.UserAttributeDefinition{ + ID: e.ID, + Key: e.Key, + Name: e.Name, + Description: e.Description, + Type: service.UserAttributeType(e.Type), + Options: toServiceOptions(e.Options), + Required: e.Required, + Validation: toServiceValidation(e.Validation), + Placeholder: e.Placeholder, + DisplayOrder: e.DisplayOrder, + Enabled: e.Enabled, + CreatedAt: e.CreatedAt, + UpdatedAt: e.UpdatedAt, + } +} + +// Type conversion helpers (map types <-> service types) +func toEntOptions(opts []service.UserAttributeOption) []map[string]any { + if opts == nil { + return []map[string]any{} + } + result := make([]map[string]any, len(opts)) + for i, o := range opts { + result[i] = map[string]any{"value": o.Value, "label": o.Label} + } + return result +} + +func toServiceOptions(opts []map[string]any) []service.UserAttributeOption { + if opts == nil { + return []service.UserAttributeOption{} + } + result := make([]service.UserAttributeOption, len(opts)) + for i, o := range opts { + result[i] = service.UserAttributeOption{ + Value: getString(o, "value"), + Label: getString(o, "label"), + } + } + return result +} + +func toEntValidation(v service.UserAttributeValidation) map[string]any { + result := map[string]any{} + if v.MinLength != nil { + result["min_length"] = *v.MinLength + } + if v.MaxLength != nil { + result["max_length"] = *v.MaxLength + } + if v.Min != nil { + result["min"] = *v.Min + } + if v.Max != nil { + result["max"] = *v.Max + } + if v.Pattern != nil { + result["pattern"] = *v.Pattern + } + if v.Message != nil { + result["message"] = *v.Message + } + return result +} + +func toServiceValidation(v map[string]any) service.UserAttributeValidation { + result := service.UserAttributeValidation{} + if val := getInt(v, "min_length"); val != nil { + result.MinLength = val + } + if val := getInt(v, "max_length"); val != nil { + result.MaxLength = val + } + if val := getInt(v, "min"); val != nil { + result.Min = val + } + if val := getInt(v, "max"); val != nil { + result.Max = val + } + if val := getStringPtr(v, "pattern"); val != nil { + result.Pattern = val + } + if val := getStringPtr(v, "message"); val != nil { + result.Message = val + } + return result +} + +// Helper functions for type conversion +func getString(m map[string]any, key string) string { + if v, ok := m[key]; ok { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +func getStringPtr(m map[string]any, key string) *string { + if v, ok := m[key]; ok { + if s, ok := v.(string); ok { + return &s + } + } + return nil +} + +func getInt(m map[string]any, key string) *int { + if v, ok := m[key]; ok { + switch n := v.(type) { + case int: + return &n + case int64: + i := int(n) + return &i + case float64: + i := int(n) + return &i + } + } + return nil +} diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index 0d579b23..c1852364 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -36,6 +36,8 @@ var ProviderSet = wire.NewSet( NewUsageLogRepository, NewSettingRepository, NewUserSubscriptionRepository, + NewUserAttributeDefinitionRepository, + NewUserAttributeValueRepository, // Cache implementations NewGatewayCache, diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 604d14df..74bc7469 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -54,6 +54,9 @@ func RegisterAdminRoutes( // 使用记录管理 registerUsageRoutes(admin, h) + + // 用户属性管理 + registerUserAttributeRoutes(admin, h) } } @@ -82,6 +85,10 @@ func registerUserManagementRoutes(admin *gin.RouterGroup, h *handler.Handlers) { users.POST("/:id/balance", h.Admin.User.UpdateBalance) users.GET("/:id/api-keys", h.Admin.User.GetUserAPIKeys) users.GET("/:id/usage", h.Admin.User.GetUserUsage) + + // User attribute values + users.GET("/:id/attributes", h.Admin.UserAttribute.GetUserAttributes) + users.PUT("/:id/attributes", h.Admin.UserAttribute.UpdateUserAttributes) } } @@ -242,3 +249,15 @@ func registerUsageRoutes(admin *gin.RouterGroup, h *handler.Handlers) { usage.GET("/search-api-keys", h.Admin.Usage.SearchApiKeys) } } + +func registerUserAttributeRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + attrs := admin.Group("/user-attributes") + { + attrs.GET("", h.Admin.UserAttribute.ListDefinitions) + attrs.POST("", h.Admin.UserAttribute.CreateDefinition) + attrs.POST("/batch", h.Admin.UserAttribute.GetBatchUserAttributes) + attrs.PUT("/reorder", h.Admin.UserAttribute.ReorderDefinitions) + attrs.PUT("/:id", h.Admin.UserAttribute.UpdateDefinition) + attrs.DELETE("/:id", h.Admin.UserAttribute.DeleteDefinition) + } +} diff --git a/backend/internal/service/user_attribute.go b/backend/internal/service/user_attribute.go new file mode 100644 index 00000000..0637102e --- /dev/null +++ b/backend/internal/service/user_attribute.go @@ -0,0 +1,125 @@ +package service + +import ( + "context" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// Error definitions for user attribute operations +var ( + ErrAttributeDefinitionNotFound = infraerrors.NotFound("ATTRIBUTE_DEFINITION_NOT_FOUND", "attribute definition not found") + ErrAttributeKeyExists = infraerrors.Conflict("ATTRIBUTE_KEY_EXISTS", "attribute key already exists") + ErrInvalidAttributeType = infraerrors.BadRequest("INVALID_ATTRIBUTE_TYPE", "invalid attribute type") + ErrAttributeValidationFailed = infraerrors.BadRequest("ATTRIBUTE_VALIDATION_FAILED", "attribute value validation failed") +) + +// UserAttributeType represents supported attribute types +type UserAttributeType string + +const ( + AttributeTypeText UserAttributeType = "text" + AttributeTypeTextarea UserAttributeType = "textarea" + AttributeTypeNumber UserAttributeType = "number" + AttributeTypeEmail UserAttributeType = "email" + AttributeTypeURL UserAttributeType = "url" + AttributeTypeDate UserAttributeType = "date" + AttributeTypeSelect UserAttributeType = "select" + AttributeTypeMultiSelect UserAttributeType = "multi_select" +) + +// UserAttributeOption represents a select option for select/multi_select types +type UserAttributeOption struct { + Value string `json:"value"` + Label string `json:"label"` +} + +// UserAttributeValidation represents validation rules for an attribute +type UserAttributeValidation struct { + MinLength *int `json:"min_length,omitempty"` + MaxLength *int `json:"max_length,omitempty"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` + Pattern *string `json:"pattern,omitempty"` + Message *string `json:"message,omitempty"` +} + +// UserAttributeDefinition represents a custom attribute definition +type UserAttributeDefinition struct { + ID int64 + Key string + Name string + Description string + Type UserAttributeType + Options []UserAttributeOption + Required bool + Validation UserAttributeValidation + Placeholder string + DisplayOrder int + Enabled bool + CreatedAt time.Time + UpdatedAt time.Time +} + +// UserAttributeValue represents a user's attribute value +type UserAttributeValue struct { + ID int64 + UserID int64 + AttributeID int64 + Value string + CreatedAt time.Time + UpdatedAt time.Time +} + +// CreateAttributeDefinitionInput for creating new definition +type CreateAttributeDefinitionInput struct { + Key string + Name string + Description string + Type UserAttributeType + Options []UserAttributeOption + Required bool + Validation UserAttributeValidation + Placeholder string + Enabled bool +} + +// UpdateAttributeDefinitionInput for updating definition +type UpdateAttributeDefinitionInput struct { + Name *string + Description *string + Type *UserAttributeType + Options *[]UserAttributeOption + Required *bool + Validation *UserAttributeValidation + Placeholder *string + Enabled *bool +} + +// UpdateUserAttributeInput for updating a single attribute value +type UpdateUserAttributeInput struct { + AttributeID int64 + Value string +} + +// UserAttributeDefinitionRepository interface for attribute definition persistence +type UserAttributeDefinitionRepository interface { + Create(ctx context.Context, def *UserAttributeDefinition) error + GetByID(ctx context.Context, id int64) (*UserAttributeDefinition, error) + GetByKey(ctx context.Context, key string) (*UserAttributeDefinition, error) + Update(ctx context.Context, def *UserAttributeDefinition) error + Delete(ctx context.Context, id int64) error + List(ctx context.Context, enabledOnly bool) ([]UserAttributeDefinition, error) + UpdateDisplayOrders(ctx context.Context, orders map[int64]int) error + ExistsByKey(ctx context.Context, key string) (bool, error) +} + +// UserAttributeValueRepository interface for user attribute value persistence +type UserAttributeValueRepository interface { + GetByUserID(ctx context.Context, userID int64) ([]UserAttributeValue, error) + GetByUserIDs(ctx context.Context, userIDs []int64) ([]UserAttributeValue, error) + UpsertBatch(ctx context.Context, userID int64, values []UpdateUserAttributeInput) error + DeleteByAttributeID(ctx context.Context, attributeID int64) error + DeleteByUserID(ctx context.Context, userID int64) error +} diff --git a/backend/internal/service/user_attribute_service.go b/backend/internal/service/user_attribute_service.go new file mode 100644 index 00000000..c27e29d0 --- /dev/null +++ b/backend/internal/service/user_attribute_service.go @@ -0,0 +1,295 @@ +package service + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// UserAttributeService handles attribute management +type UserAttributeService struct { + defRepo UserAttributeDefinitionRepository + valueRepo UserAttributeValueRepository +} + +// NewUserAttributeService creates a new service instance +func NewUserAttributeService( + defRepo UserAttributeDefinitionRepository, + valueRepo UserAttributeValueRepository, +) *UserAttributeService { + return &UserAttributeService{ + defRepo: defRepo, + valueRepo: valueRepo, + } +} + +// CreateDefinition creates a new attribute definition +func (s *UserAttributeService) CreateDefinition(ctx context.Context, input CreateAttributeDefinitionInput) (*UserAttributeDefinition, error) { + // Validate type + if !isValidAttributeType(input.Type) { + return nil, ErrInvalidAttributeType + } + + // Check if key exists + exists, err := s.defRepo.ExistsByKey(ctx, input.Key) + if err != nil { + return nil, fmt.Errorf("check key exists: %w", err) + } + if exists { + return nil, ErrAttributeKeyExists + } + + def := &UserAttributeDefinition{ + Key: input.Key, + Name: input.Name, + Description: input.Description, + Type: input.Type, + Options: input.Options, + Required: input.Required, + Validation: input.Validation, + Placeholder: input.Placeholder, + Enabled: input.Enabled, + } + + if err := s.defRepo.Create(ctx, def); err != nil { + return nil, fmt.Errorf("create definition: %w", err) + } + + return def, nil +} + +// GetDefinition retrieves a definition by ID +func (s *UserAttributeService) GetDefinition(ctx context.Context, id int64) (*UserAttributeDefinition, error) { + return s.defRepo.GetByID(ctx, id) +} + +// ListDefinitions lists all definitions +func (s *UserAttributeService) ListDefinitions(ctx context.Context, enabledOnly bool) ([]UserAttributeDefinition, error) { + return s.defRepo.List(ctx, enabledOnly) +} + +// UpdateDefinition updates an existing definition +func (s *UserAttributeService) UpdateDefinition(ctx context.Context, id int64, input UpdateAttributeDefinitionInput) (*UserAttributeDefinition, error) { + def, err := s.defRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Name != nil { + def.Name = *input.Name + } + if input.Description != nil { + def.Description = *input.Description + } + if input.Type != nil { + if !isValidAttributeType(*input.Type) { + return nil, ErrInvalidAttributeType + } + def.Type = *input.Type + } + if input.Options != nil { + def.Options = *input.Options + } + if input.Required != nil { + def.Required = *input.Required + } + if input.Validation != nil { + def.Validation = *input.Validation + } + if input.Placeholder != nil { + def.Placeholder = *input.Placeholder + } + if input.Enabled != nil { + def.Enabled = *input.Enabled + } + + if err := s.defRepo.Update(ctx, def); err != nil { + return nil, fmt.Errorf("update definition: %w", err) + } + + return def, nil +} + +// DeleteDefinition soft-deletes a definition and hard-deletes associated values +func (s *UserAttributeService) DeleteDefinition(ctx context.Context, id int64) error { + // Check if definition exists + _, err := s.defRepo.GetByID(ctx, id) + if err != nil { + return err + } + + // First delete all values (hard delete) + if err := s.valueRepo.DeleteByAttributeID(ctx, id); err != nil { + return fmt.Errorf("delete values: %w", err) + } + + // Then soft-delete the definition + if err := s.defRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete definition: %w", err) + } + + return nil +} + +// ReorderDefinitions updates display order for multiple definitions +func (s *UserAttributeService) ReorderDefinitions(ctx context.Context, orders map[int64]int) error { + return s.defRepo.UpdateDisplayOrders(ctx, orders) +} + +// GetUserAttributes retrieves all attribute values for a user +func (s *UserAttributeService) GetUserAttributes(ctx context.Context, userID int64) ([]UserAttributeValue, error) { + return s.valueRepo.GetByUserID(ctx, userID) +} + +// GetBatchUserAttributes retrieves attribute values for multiple users +// Returns a map of userID -> map of attributeID -> value +func (s *UserAttributeService) GetBatchUserAttributes(ctx context.Context, userIDs []int64) (map[int64]map[int64]string, error) { + values, err := s.valueRepo.GetByUserIDs(ctx, userIDs) + if err != nil { + return nil, err + } + + result := make(map[int64]map[int64]string) + for _, v := range values { + if result[v.UserID] == nil { + result[v.UserID] = make(map[int64]string) + } + result[v.UserID][v.AttributeID] = v.Value + } + + return result, nil +} + +// UpdateUserAttributes batch updates attribute values for a user +func (s *UserAttributeService) UpdateUserAttributes(ctx context.Context, userID int64, inputs []UpdateUserAttributeInput) error { + // Validate all values before updating + defs, err := s.defRepo.List(ctx, true) + if err != nil { + return fmt.Errorf("list definitions: %w", err) + } + + defMap := make(map[int64]*UserAttributeDefinition, len(defs)) + for i := range defs { + defMap[defs[i].ID] = &defs[i] + } + + for _, input := range inputs { + def, ok := defMap[input.AttributeID] + if !ok { + return ErrAttributeDefinitionNotFound + } + + if err := s.validateValue(def, input.Value); err != nil { + return err + } + } + + return s.valueRepo.UpsertBatch(ctx, userID, inputs) +} + +// validateValue validates a value against its definition +func (s *UserAttributeService) validateValue(def *UserAttributeDefinition, value string) error { + // Skip validation for empty non-required fields + if value == "" && !def.Required { + return nil + } + + // Required check + if def.Required && value == "" { + return validationError(fmt.Sprintf("%s is required", def.Name)) + } + + v := def.Validation + + // String length validation + if v.MinLength != nil && len(value) < *v.MinLength { + return validationError(fmt.Sprintf("%s must be at least %d characters", def.Name, *v.MinLength)) + } + if v.MaxLength != nil && len(value) > *v.MaxLength { + return validationError(fmt.Sprintf("%s must be at most %d characters", def.Name, *v.MaxLength)) + } + + // Number validation + if def.Type == AttributeTypeNumber && value != "" { + num, err := strconv.Atoi(value) + if err != nil { + return validationError(fmt.Sprintf("%s must be a number", def.Name)) + } + if v.Min != nil && num < *v.Min { + return validationError(fmt.Sprintf("%s must be at least %d", def.Name, *v.Min)) + } + if v.Max != nil && num > *v.Max { + return validationError(fmt.Sprintf("%s must be at most %d", def.Name, *v.Max)) + } + } + + // Pattern validation + if v.Pattern != nil && *v.Pattern != "" && value != "" { + re, err := regexp.Compile(*v.Pattern) + if err == nil && !re.MatchString(value) { + msg := def.Name + " format is invalid" + if v.Message != nil && *v.Message != "" { + msg = *v.Message + } + return validationError(msg) + } + } + + // Select validation + if def.Type == AttributeTypeSelect && value != "" { + found := false + for _, opt := range def.Options { + if opt.Value == value { + found = true + break + } + } + if !found { + return validationError(fmt.Sprintf("%s: invalid option", def.Name)) + } + } + + // Multi-select validation (stored as JSON array) + if def.Type == AttributeTypeMultiSelect && value != "" { + var values []string + if err := json.Unmarshal([]byte(value), &values); err != nil { + // Try comma-separated fallback + values = strings.Split(value, ",") + } + for _, val := range values { + val = strings.TrimSpace(val) + found := false + for _, opt := range def.Options { + if opt.Value == val { + found = true + break + } + } + if !found { + return validationError(fmt.Sprintf("%s: invalid option %s", def.Name, val)) + } + } + } + + return nil +} + +// validationError creates a validation error with a custom message +func validationError(msg string) error { + return infraerrors.BadRequest("ATTRIBUTE_VALIDATION_FAILED", msg) +} + +func isValidAttributeType(t UserAttributeType) bool { + switch t { + case AttributeTypeText, AttributeTypeTextarea, AttributeTypeNumber, + AttributeTypeEmail, AttributeTypeURL, AttributeTypeDate, + AttributeTypeSelect, AttributeTypeMultiSelect: + return true + } + return false +} diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 9920a3ef..7971f041 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -125,4 +125,5 @@ var ProviderSet = wire.NewSet( ProvideTimingWheelService, ProvideDeferredService, ProvideAntigravityQuotaRefresher, + NewUserAttributeService, ) diff --git a/backend/migrations/018_user_attributes.sql b/backend/migrations/018_user_attributes.sql new file mode 100644 index 00000000..d2dad80d --- /dev/null +++ b/backend/migrations/018_user_attributes.sql @@ -0,0 +1,48 @@ +-- Add user attribute definitions and values tables for custom user attributes. + +-- User Attribute Definitions table (with soft delete support) +CREATE TABLE IF NOT EXISTS user_attribute_definitions ( + id BIGSERIAL PRIMARY KEY, + key VARCHAR(100) NOT NULL, + name VARCHAR(255) NOT NULL, + description TEXT DEFAULT '', + type VARCHAR(20) NOT NULL, + options JSONB DEFAULT '[]'::jsonb, + required BOOLEAN NOT NULL DEFAULT FALSE, + validation JSONB DEFAULT '{}'::jsonb, + placeholder VARCHAR(255) DEFAULT '', + display_order INT NOT NULL DEFAULT 0, + enabled BOOLEAN NOT NULL DEFAULT TRUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +-- Partial unique index for key (only for non-deleted records) +-- Allows reusing keys after soft delete +CREATE UNIQUE INDEX IF NOT EXISTS idx_user_attribute_definitions_key_unique + ON user_attribute_definitions(key) WHERE deleted_at IS NULL; + +CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_enabled + ON user_attribute_definitions(enabled); +CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_display_order + ON user_attribute_definitions(display_order); +CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_deleted_at + ON user_attribute_definitions(deleted_at); + +-- User Attribute Values table (hard delete only, no deleted_at) +CREATE TABLE IF NOT EXISTS user_attribute_values ( + id BIGSERIAL PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + attribute_id BIGINT NOT NULL REFERENCES user_attribute_definitions(id) ON DELETE CASCADE, + value TEXT DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + UNIQUE(user_id, attribute_id) +); + +CREATE INDEX IF NOT EXISTS idx_user_attribute_values_user_id + ON user_attribute_values(user_id); +CREATE INDEX IF NOT EXISTS idx_user_attribute_values_attribute_id + ON user_attribute_values(attribute_id); From f44cf642bc7f7165804d91ad08e0440ce1b6f94d Mon Sep 17 00:00:00 2001 From: Edric Li Date: Thu, 1 Jan 2026 18:59:06 +0800 Subject: [PATCH 41/51] feat(frontend): add user attributes management UI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add Vue components and API client for managing user custom attributes. - Add userAttributes API client with CRUD operations - Add UserAttributeForm component for displaying/editing attribute values - Add UserAttributesConfigModal for attribute definition management - Support all attribute types: text, textarea, number, email, url, date, select, multi_select 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- frontend/src/api/admin/index.ts | 7 +- frontend/src/api/admin/userAttributes.ts | 131 ++++++ .../src/components/user/UserAttributeForm.vue | 207 +++++++++ .../user/UserAttributesConfigModal.vue | 404 ++++++++++++++++++ 4 files changed, 747 insertions(+), 2 deletions(-) create mode 100644 frontend/src/api/admin/userAttributes.ts create mode 100644 frontend/src/components/user/UserAttributeForm.vue create mode 100644 frontend/src/components/user/UserAttributesConfigModal.vue diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts index 7c98b74e..ea12f6d2 100644 --- a/frontend/src/api/admin/index.ts +++ b/frontend/src/api/admin/index.ts @@ -15,6 +15,7 @@ import subscriptionsAPI from './subscriptions' import usageAPI from './usage' import geminiAPI from './gemini' import antigravityAPI from './antigravity' +import userAttributesAPI from './userAttributes' /** * Unified admin API object for convenient access @@ -31,7 +32,8 @@ export const adminAPI = { subscriptions: subscriptionsAPI, usage: usageAPI, gemini: geminiAPI, - antigravity: antigravityAPI + antigravity: antigravityAPI, + userAttributes: userAttributesAPI } export { @@ -46,7 +48,8 @@ export { subscriptionsAPI, usageAPI, geminiAPI, - antigravityAPI + antigravityAPI, + userAttributesAPI } export default adminAPI diff --git a/frontend/src/api/admin/userAttributes.ts b/frontend/src/api/admin/userAttributes.ts new file mode 100644 index 00000000..304aa828 --- /dev/null +++ b/frontend/src/api/admin/userAttributes.ts @@ -0,0 +1,131 @@ +/** + * Admin User Attributes API endpoints + * Handles user custom attribute definitions and values + */ + +import { apiClient } from '../client' +import type { + UserAttributeDefinition, + UserAttributeValue, + CreateUserAttributeRequest, + UpdateUserAttributeRequest, + UserAttributeValuesMap +} from '@/types' + +/** + * Get all attribute definitions + */ +export async function listDefinitions(): Promise { + const { data } = await apiClient.get('/admin/user-attributes') + return data +} + +/** + * Get enabled attribute definitions only + */ +export async function listEnabledDefinitions(): Promise { + const { data } = await apiClient.get('/admin/user-attributes', { + params: { enabled: true } + }) + return data +} + +/** + * Create a new attribute definition + */ +export async function createDefinition( + request: CreateUserAttributeRequest +): Promise { + const { data } = await apiClient.post('/admin/user-attributes', request) + return data +} + +/** + * Update an attribute definition + */ +export async function updateDefinition( + id: number, + request: UpdateUserAttributeRequest +): Promise { + const { data } = await apiClient.put( + `/admin/user-attributes/${id}`, + request + ) + return data +} + +/** + * Delete an attribute definition + */ +export async function deleteDefinition(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/user-attributes/${id}`) + return data +} + +/** + * Reorder attribute definitions + */ +export async function reorderDefinitions(ids: number[]): Promise<{ message: string }> { + const { data } = await apiClient.put<{ message: string }>('/admin/user-attributes/reorder', { + ids + }) + return data +} + +/** + * Get user's attribute values + */ +export async function getUserAttributeValues(userId: number): Promise { + const { data } = await apiClient.get( + `/admin/users/${userId}/attributes` + ) + return data +} + +/** + * Update user's attribute values (batch) + */ +export async function updateUserAttributeValues( + userId: number, + values: UserAttributeValuesMap +): Promise<{ message: string }> { + const { data } = await apiClient.put<{ message: string }>( + `/admin/users/${userId}/attributes`, + { values } + ) + return data +} + +/** + * Batch response type + */ +export interface BatchUserAttributesResponse { + attributes: Record> +} + +/** + * Get attribute values for multiple users + */ +export async function getBatchUserAttributes( + userIds: number[] +): Promise { + const { data } = await apiClient.post( + '/admin/user-attributes/batch', + { user_ids: userIds } + ) + return data +} + +export const userAttributesAPI = { + listDefinitions, + listEnabledDefinitions, + createDefinition, + updateDefinition, + deleteDefinition, + reorderDefinitions, + getUserAttributeValues, + updateUserAttributeValues, + getBatchUserAttributes +} + +export default userAttributesAPI diff --git a/frontend/src/components/user/UserAttributeForm.vue b/frontend/src/components/user/UserAttributeForm.vue new file mode 100644 index 00000000..68807c5d --- /dev/null +++ b/frontend/src/components/user/UserAttributeForm.vue @@ -0,0 +1,207 @@ +