diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..06696994 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,18 @@ + +# OpenSpec Instructions + +These instructions are for AI assistants working in this project. + +Always open `@/openspec/AGENTS.md` when the request: +- Mentions planning or proposals (words like proposal, spec, change, plan) +- Introduces new capabilities, breaking changes, architecture shifts, or big performance/security work +- Sounds ambiguous and you need the authoritative spec before coding + +Use `@/openspec/AGENTS.md` to learn: +- How to create and apply change proposals +- Spec format and conventions +- Project structure and guidelines + +Keep this managed block so 'openspec update' can refresh the instructions. + + \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..32c0923a --- /dev/null +++ b/Makefile @@ -0,0 +1,12 @@ +.PHONY: build build-backend build-frontend + +# 一键编译前后端 +build: build-backend build-frontend + +# 编译后端(复用 backend/Makefile) +build-backend: + @$(MAKE) -C backend build + +# 编译前端(需要已安装依赖) +build-frontend: + @npm --prefix frontend run build diff --git a/README.md b/README.md index e25a6e8a..40bb06b6 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,8 @@ English | [中文](README_CN.md) Try Sub2API online: **https://v2.pincc.ai/** +Demo credentials (shared demo environment; **not** created automatically for self-hosted installs): + | Email | Password | |-------|----------| | admin@sub2api.com | admin123 | @@ -260,8 +262,10 @@ jwt: expire_hour: 24 default: - admin_email: "admin@example.com" - admin_password: "admin123" + user_concurrency: 5 + user_balance: 0 + api_key_prefix: "sk-" + rate_multiplier: 1.0 ``` ```bash @@ -281,6 +285,16 @@ cd frontend npm run dev ``` +#### Code Generation + +When editing `backend/ent/schema`, regenerate Ent + Wire: + +```bash +cd backend +go generate ./ent +go generate ./cmd/server +``` + --- ## Project Structure diff --git a/README_CN.md b/README_CN.md index a93fb9d8..80fa4010 100644 --- a/README_CN.md +++ b/README_CN.md @@ -20,6 +20,8 @@ 体验地址:**https://v2.pincc.ai/** +演示账号(共享演示环境;自建部署不会自动创建该账号): + | 邮箱 | 密码 | |------|------| | admin@sub2api.com | admin123 | @@ -260,8 +262,10 @@ jwt: expire_hour: 24 default: - admin_email: "admin@example.com" - admin_password: "admin123" + user_concurrency: 5 + user_balance: 0 + api_key_prefix: "sk-" + rate_multiplier: 1.0 ``` ```bash @@ -281,6 +285,16 @@ cd frontend npm run dev ``` +#### 代码生成 + +修改 `backend/ent/schema` 后,需要重新生成 Ent + Wire: + +```bash +cd backend +go generate ./ent +go generate ./cmd/server +``` + --- ## 项目结构 diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go index a81a572e..288daec9 100644 --- a/backend/cmd/server/main.go +++ b/backend/cmd/server/main.go @@ -15,6 +15,7 @@ import ( "syscall" "time" + _ "github.com/Wei-Shaw/sub2api/ent/runtime" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler" "github.com/Wei-Shaw/sub2api/internal/server/middleware" diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index 596c8516..2f491b21 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -9,6 +9,7 @@ import ( "net/http" "time" + "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler" "github.com/Wei-Shaw/sub2api/internal/infrastructure" @@ -19,7 +20,6 @@ import ( "github.com/google/wire" "github.com/redis/go-redis/v9" - "gorm.io/gorm" ) type Application struct { @@ -62,7 +62,7 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo { } func provideCleanup( - db *gorm.DB, + entClient *ent.Client, rdb *redis.Client, tokenRefresh *service.TokenRefreshService, pricing *service.PricingService, @@ -107,12 +107,8 @@ func provideCleanup( {"Redis", func() error { return rdb.Close() }}, - {"Database", func() error { - sqlDB, err := db.DB() - if err != nil { - return err - } - return sqlDB.Close() + {"Ent", func() error { + return entClient.Close() }}, } diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 9904aa0d..2b9a5cf6 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -8,6 +8,7 @@ package main import ( "context" + "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler" "github.com/Wei-Shaw/sub2api/internal/handler/admin" @@ -17,7 +18,6 @@ import ( "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/redis/go-redis/v9" - "gorm.io/gorm" "log" "net/http" "time" @@ -25,6 +25,7 @@ import ( import ( _ "embed" + _ "github.com/Wei-Shaw/sub2api/ent/runtime" ) // Injectors from wire.go: @@ -34,15 +35,19 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { if err != nil { return nil, err } - db, err := infrastructure.ProvideDB(configConfig) + client, err := infrastructure.ProvideEnt(configConfig) if err != nil { return nil, err } - userRepository := repository.NewUserRepository(db) - settingRepository := repository.NewSettingRepository(db) + sqlDB, err := infrastructure.ProvideSQLDB(client) + if err != nil { + return nil, err + } + userRepository := repository.NewUserRepository(client, sqlDB) + settingRepository := repository.NewSettingRepository(client) settingService := service.NewSettingService(settingRepository, configConfig) - client := infrastructure.ProvideRedis(configConfig) - emailCache := repository.NewEmailCache(client) + redisClient := infrastructure.ProvideRedis(configConfig) + emailCache := repository.NewEmailCache(redisClient) emailService := service.NewEmailService(settingRepository, emailCache) turnstileVerifier := repository.NewTurnstileVerifier() turnstileService := service.NewTurnstileService(settingService, turnstileVerifier) @@ -51,27 +56,27 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { userService := service.NewUserService(userRepository) authHandler := handler.NewAuthHandler(authService, userService) userHandler := handler.NewUserHandler(userService) - apiKeyRepository := repository.NewApiKeyRepository(db) - groupRepository := repository.NewGroupRepository(db) - userSubscriptionRepository := repository.NewUserSubscriptionRepository(db) - apiKeyCache := repository.NewApiKeyCache(client) + apiKeyRepository := repository.NewApiKeyRepository(client) + groupRepository := repository.NewGroupRepository(client, sqlDB) + userSubscriptionRepository := repository.NewUserSubscriptionRepository(client) + apiKeyCache := repository.NewApiKeyCache(redisClient) apiKeyService := service.NewApiKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, apiKeyCache, configConfig) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) - usageLogRepository := repository.NewUsageLogRepository(db) + usageLogRepository := repository.NewUsageLogRepository(client, sqlDB) usageService := service.NewUsageService(usageLogRepository, userRepository) usageHandler := handler.NewUsageHandler(usageService, apiKeyService) - redeemCodeRepository := repository.NewRedeemCodeRepository(db) - billingCache := repository.NewBillingCache(client) + redeemCodeRepository := repository.NewRedeemCodeRepository(client) + billingCache := repository.NewBillingCache(redisClient) billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository) subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService) - redeemCache := repository.NewRedeemCache(client) + redeemCache := repository.NewRedeemCache(redisClient) redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService) redeemHandler := handler.NewRedeemHandler(redeemService) subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService) dashboardService := service.NewDashboardService(usageLogRepository) dashboardHandler := admin.NewDashboardHandler(dashboardService) - accountRepository := repository.NewAccountRepository(db) - proxyRepository := repository.NewProxyRepository(db) + accountRepository := repository.NewAccountRepository(client, sqlDB) + proxyRepository := repository.NewProxyRepository(client, sqlDB) proxyExitInfoProber := repository.NewProxyExitInfoProber() adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber) adminUserHandler := admin.NewUserHandler(adminService) @@ -86,11 +91,11 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { rateLimitService := service.NewRateLimitService(accountRepository, configConfig) claudeUsageFetcher := repository.NewClaudeUsageFetcher() accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher) - geminiTokenCache := repository.NewGeminiTokenCache(client) + geminiTokenCache := repository.NewGeminiTokenCache(redisClient) geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService) httpUpstream := repository.NewHTTPUpstream(configConfig) accountTestService := service.NewAccountTestService(accountRepository, oAuthService, openAIOAuthService, geminiTokenProvider, httpUpstream) - concurrencyCache := repository.NewConcurrencyCache(client) + concurrencyCache := repository.NewConcurrencyCache(redisClient) concurrencyService := service.NewConcurrencyService(concurrencyCache) crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService) accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService) @@ -100,7 +105,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { proxyHandler := admin.NewProxyHandler(adminService) adminRedeemHandler := admin.NewRedeemHandler(adminService) settingHandler := admin.NewSettingHandler(settingService, emailService) - updateCache := repository.NewUpdateCache(client) + updateCache := repository.NewUpdateCache(redisClient) gitHubReleaseClient := repository.NewGitHubReleaseClient() serviceBuildInfo := provideServiceBuildInfo(buildInfo) updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo) @@ -108,14 +113,14 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler) - gatewayCache := repository.NewGatewayCache(client) + gatewayCache := repository.NewGatewayCache(redisClient) pricingRemoteClient := repository.NewPricingRemoteClient() pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient) if err != nil { return nil, err } billingService := service.NewBillingService(configConfig, pricingService) - identityCache := repository.NewIdentityCache(client) + identityCache := repository.NewIdentityCache(redisClient) identityService := service.NewIdentityService(identityCache) timingWheelService := service.ProvideTimingWheelService() deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) @@ -132,7 +137,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService) httpServer := server.ProvideHTTPServer(configConfig, engine) tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig) - v := provideCleanup(db, client, tokenRefreshService, pricingService, emailQueueService, oAuthService, openAIOAuthService, geminiOAuthService) + v := provideCleanup(client, redisClient, tokenRefreshService, pricingService, emailQueueService, oAuthService, openAIOAuthService, geminiOAuthService) application := &Application{ Server: httpServer, Cleanup: v, @@ -155,7 +160,7 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo { } func provideCleanup( - db *gorm.DB, + entClient *ent.Client, rdb *redis.Client, tokenRefresh *service.TokenRefreshService, pricing *service.PricingService, @@ -199,12 +204,8 @@ func provideCleanup( {"Redis", func() error { return rdb.Close() }}, - {"Database", func() error { - sqlDB, err := db.DB() - if err != nil { - return err - } - return sqlDB.Close() + {"Ent", func() error { + return entClient.Close() }}, } diff --git a/backend/ent/account.go b/backend/ent/account.go new file mode 100644 index 00000000..59f55edb --- /dev/null +++ b/backend/ent/account.go @@ -0,0 +1,407 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/account" +) + +// Account is the model entity for the Account schema. +type Account struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Platform holds the value of the "platform" field. + Platform string `json:"platform,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Credentials holds the value of the "credentials" field. + Credentials map[string]interface{} `json:"credentials,omitempty"` + // Extra holds the value of the "extra" field. + Extra map[string]interface{} `json:"extra,omitempty"` + // ProxyID holds the value of the "proxy_id" field. + ProxyID *int64 `json:"proxy_id,omitempty"` + // Concurrency holds the value of the "concurrency" field. + Concurrency int `json:"concurrency,omitempty"` + // Priority holds the value of the "priority" field. + Priority int `json:"priority,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // ErrorMessage holds the value of the "error_message" field. + ErrorMessage *string `json:"error_message,omitempty"` + // LastUsedAt holds the value of the "last_used_at" field. + LastUsedAt *time.Time `json:"last_used_at,omitempty"` + // Schedulable holds the value of the "schedulable" field. + Schedulable bool `json:"schedulable,omitempty"` + // RateLimitedAt holds the value of the "rate_limited_at" field. + RateLimitedAt *time.Time `json:"rate_limited_at,omitempty"` + // RateLimitResetAt holds the value of the "rate_limit_reset_at" field. + RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"` + // OverloadUntil holds the value of the "overload_until" field. + OverloadUntil *time.Time `json:"overload_until,omitempty"` + // SessionWindowStart holds the value of the "session_window_start" field. + SessionWindowStart *time.Time `json:"session_window_start,omitempty"` + // SessionWindowEnd holds the value of the "session_window_end" field. + SessionWindowEnd *time.Time `json:"session_window_end,omitempty"` + // SessionWindowStatus holds the value of the "session_window_status" field. + SessionWindowStatus *string `json:"session_window_status,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AccountQuery when eager-loading is set. + Edges AccountEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AccountEdges holds the relations/edges for other nodes in the graph. +type AccountEdges struct { + // Groups holds the value of the groups edge. + Groups []*Group `json:"groups,omitempty"` + // AccountGroups holds the value of the account_groups edge. + AccountGroups []*AccountGroup `json:"account_groups,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// GroupsOrErr returns the Groups value or an error if the edge +// was not loaded in eager-loading. +func (e AccountEdges) GroupsOrErr() ([]*Group, error) { + if e.loadedTypes[0] { + return e.Groups, nil + } + return nil, &NotLoadedError{edge: "groups"} +} + +// AccountGroupsOrErr returns the AccountGroups value or an error if the edge +// was not loaded in eager-loading. +func (e AccountEdges) AccountGroupsOrErr() ([]*AccountGroup, error) { + if e.loadedTypes[1] { + return e.AccountGroups, nil + } + return nil, &NotLoadedError{edge: "account_groups"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Account) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case account.FieldCredentials, account.FieldExtra: + values[i] = new([]byte) + case account.FieldSchedulable: + values[i] = new(sql.NullBool) + case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority: + values[i] = new(sql.NullInt64) + case account.FieldName, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus: + values[i] = new(sql.NullString) + case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Account fields. +func (_m *Account) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case account.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case account.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case account.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case account.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case account.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case account.FieldPlatform: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field platform", values[i]) + } else if value.Valid { + _m.Platform = value.String + } + case account.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + _m.Type = value.String + } + case account.FieldCredentials: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field credentials", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Credentials); err != nil { + return fmt.Errorf("unmarshal field credentials: %w", err) + } + } + case account.FieldExtra: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extra", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Extra); err != nil { + return fmt.Errorf("unmarshal field extra: %w", err) + } + } + case account.FieldProxyID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field proxy_id", values[i]) + } else if value.Valid { + _m.ProxyID = new(int64) + *_m.ProxyID = value.Int64 + } + case account.FieldConcurrency: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field concurrency", values[i]) + } else if value.Valid { + _m.Concurrency = int(value.Int64) + } + case account.FieldPriority: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field priority", values[i]) + } else if value.Valid { + _m.Priority = int(value.Int64) + } + case account.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case account.FieldErrorMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field error_message", values[i]) + } else if value.Valid { + _m.ErrorMessage = new(string) + *_m.ErrorMessage = value.String + } + case account.FieldLastUsedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_used_at", values[i]) + } else if value.Valid { + _m.LastUsedAt = new(time.Time) + *_m.LastUsedAt = value.Time + } + case account.FieldSchedulable: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field schedulable", values[i]) + } else if value.Valid { + _m.Schedulable = value.Bool + } + case account.FieldRateLimitedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field rate_limited_at", values[i]) + } else if value.Valid { + _m.RateLimitedAt = new(time.Time) + *_m.RateLimitedAt = value.Time + } + case account.FieldRateLimitResetAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field rate_limit_reset_at", values[i]) + } else if value.Valid { + _m.RateLimitResetAt = new(time.Time) + *_m.RateLimitResetAt = value.Time + } + case account.FieldOverloadUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field overload_until", values[i]) + } else if value.Valid { + _m.OverloadUntil = new(time.Time) + *_m.OverloadUntil = value.Time + } + case account.FieldSessionWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field session_window_start", values[i]) + } else if value.Valid { + _m.SessionWindowStart = new(time.Time) + *_m.SessionWindowStart = value.Time + } + case account.FieldSessionWindowEnd: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field session_window_end", values[i]) + } else if value.Valid { + _m.SessionWindowEnd = new(time.Time) + *_m.SessionWindowEnd = value.Time + } + case account.FieldSessionWindowStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field session_window_status", values[i]) + } else if value.Valid { + _m.SessionWindowStatus = new(string) + *_m.SessionWindowStatus = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Account. +// This includes values selected through modifiers, order, etc. +func (_m *Account) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryGroups queries the "groups" edge of the Account entity. +func (_m *Account) QueryGroups() *GroupQuery { + return NewAccountClient(_m.config).QueryGroups(_m) +} + +// QueryAccountGroups queries the "account_groups" edge of the Account entity. +func (_m *Account) QueryAccountGroups() *AccountGroupQuery { + return NewAccountClient(_m.config).QueryAccountGroups(_m) +} + +// Update returns a builder for updating this Account. +// Note that you need to call Account.Unwrap() before calling this method if this Account +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Account) Update() *AccountUpdateOne { + return NewAccountClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Account entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Account) Unwrap() *Account { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Account is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Account) String() string { + var builder strings.Builder + builder.WriteString("Account(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("platform=") + builder.WriteString(_m.Platform) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(_m.Type) + builder.WriteString(", ") + builder.WriteString("credentials=") + builder.WriteString(fmt.Sprintf("%v", _m.Credentials)) + builder.WriteString(", ") + builder.WriteString("extra=") + builder.WriteString(fmt.Sprintf("%v", _m.Extra)) + builder.WriteString(", ") + if v := _m.ProxyID; v != nil { + builder.WriteString("proxy_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("concurrency=") + builder.WriteString(fmt.Sprintf("%v", _m.Concurrency)) + builder.WriteString(", ") + builder.WriteString("priority=") + builder.WriteString(fmt.Sprintf("%v", _m.Priority)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.ErrorMessage; v != nil { + builder.WriteString("error_message=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.LastUsedAt; v != nil { + builder.WriteString("last_used_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("schedulable=") + builder.WriteString(fmt.Sprintf("%v", _m.Schedulable)) + builder.WriteString(", ") + if v := _m.RateLimitedAt; v != nil { + builder.WriteString("rate_limited_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.RateLimitResetAt; v != nil { + builder.WriteString("rate_limit_reset_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.OverloadUntil; v != nil { + builder.WriteString("overload_until=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.SessionWindowStart; v != nil { + builder.WriteString("session_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.SessionWindowEnd; v != nil { + builder.WriteString("session_window_end=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.SessionWindowStatus; v != nil { + builder.WriteString("session_window_status=") + builder.WriteString(*v) + } + builder.WriteByte(')') + return builder.String() +} + +// Accounts is a parsable slice of Account. +type Accounts []*Account diff --git a/backend/ent/account/account.go b/backend/ent/account/account.go new file mode 100644 index 00000000..65a130fd --- /dev/null +++ b/backend/ent/account/account.go @@ -0,0 +1,303 @@ +// Code generated by ent, DO NOT EDIT. + +package account + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the account type in the database. + Label = "account" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldPlatform holds the string denoting the platform field in the database. + FieldPlatform = "platform" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldCredentials holds the string denoting the credentials field in the database. + FieldCredentials = "credentials" + // FieldExtra holds the string denoting the extra field in the database. + FieldExtra = "extra" + // FieldProxyID holds the string denoting the proxy_id field in the database. + FieldProxyID = "proxy_id" + // FieldConcurrency holds the string denoting the concurrency field in the database. + FieldConcurrency = "concurrency" + // FieldPriority holds the string denoting the priority field in the database. + FieldPriority = "priority" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldErrorMessage holds the string denoting the error_message field in the database. + FieldErrorMessage = "error_message" + // FieldLastUsedAt holds the string denoting the last_used_at field in the database. + FieldLastUsedAt = "last_used_at" + // FieldSchedulable holds the string denoting the schedulable field in the database. + FieldSchedulable = "schedulable" + // FieldRateLimitedAt holds the string denoting the rate_limited_at field in the database. + FieldRateLimitedAt = "rate_limited_at" + // FieldRateLimitResetAt holds the string denoting the rate_limit_reset_at field in the database. + FieldRateLimitResetAt = "rate_limit_reset_at" + // FieldOverloadUntil holds the string denoting the overload_until field in the database. + FieldOverloadUntil = "overload_until" + // FieldSessionWindowStart holds the string denoting the session_window_start field in the database. + FieldSessionWindowStart = "session_window_start" + // FieldSessionWindowEnd holds the string denoting the session_window_end field in the database. + FieldSessionWindowEnd = "session_window_end" + // FieldSessionWindowStatus holds the string denoting the session_window_status field in the database. + FieldSessionWindowStatus = "session_window_status" + // EdgeGroups holds the string denoting the groups edge name in mutations. + EdgeGroups = "groups" + // EdgeAccountGroups holds the string denoting the account_groups edge name in mutations. + EdgeAccountGroups = "account_groups" + // Table holds the table name of the account in the database. + Table = "accounts" + // GroupsTable is the table that holds the groups relation/edge. The primary key declared below. + GroupsTable = "account_groups" + // GroupsInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupsInverseTable = "groups" + // AccountGroupsTable is the table that holds the account_groups relation/edge. + AccountGroupsTable = "account_groups" + // AccountGroupsInverseTable is the table name for the AccountGroup entity. + // It exists in this package in order to avoid circular dependency with the "accountgroup" package. + AccountGroupsInverseTable = "account_groups" + // AccountGroupsColumn is the table column denoting the account_groups relation/edge. + AccountGroupsColumn = "account_id" +) + +// Columns holds all SQL columns for account fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldName, + FieldPlatform, + FieldType, + FieldCredentials, + FieldExtra, + FieldProxyID, + FieldConcurrency, + FieldPriority, + FieldStatus, + FieldErrorMessage, + FieldLastUsedAt, + FieldSchedulable, + FieldRateLimitedAt, + FieldRateLimitResetAt, + FieldOverloadUntil, + FieldSessionWindowStart, + FieldSessionWindowEnd, + FieldSessionWindowStatus, +} + +var ( + // GroupsPrimaryKey and GroupsColumn2 are the table columns denoting the + // primary key for the groups relation (M2M). + GroupsPrimaryKey = []string{"account_id", "group_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + PlatformValidator func(string) error + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator func(string) error + // DefaultCredentials holds the default value on creation for the "credentials" field. + DefaultCredentials func() map[string]interface{} + // DefaultExtra holds the default value on creation for the "extra" field. + DefaultExtra func() map[string]interface{} + // DefaultConcurrency holds the default value on creation for the "concurrency" field. + DefaultConcurrency int + // DefaultPriority holds the default value on creation for the "priority" field. + DefaultPriority int + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultSchedulable holds the default value on creation for the "schedulable" field. + DefaultSchedulable bool + // SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save. + SessionWindowStatusValidator func(string) error +) + +// OrderOption defines the ordering options for the Account queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByPlatform orders the results by the platform field. +func ByPlatform(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPlatform, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByProxyID orders the results by the proxy_id field. +func ByProxyID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProxyID, opts...).ToFunc() +} + +// ByConcurrency orders the results by the concurrency field. +func ByConcurrency(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConcurrency, opts...).ToFunc() +} + +// ByPriority orders the results by the priority field. +func ByPriority(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPriority, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByErrorMessage orders the results by the error_message field. +func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorMessage, opts...).ToFunc() +} + +// ByLastUsedAt orders the results by the last_used_at field. +func ByLastUsedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastUsedAt, opts...).ToFunc() +} + +// BySchedulable orders the results by the schedulable field. +func BySchedulable(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSchedulable, opts...).ToFunc() +} + +// ByRateLimitedAt orders the results by the rate_limited_at field. +func ByRateLimitedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateLimitedAt, opts...).ToFunc() +} + +// ByRateLimitResetAt orders the results by the rate_limit_reset_at field. +func ByRateLimitResetAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateLimitResetAt, opts...).ToFunc() +} + +// ByOverloadUntil orders the results by the overload_until field. +func ByOverloadUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc() +} + +// BySessionWindowStart orders the results by the session_window_start field. +func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc() +} + +// BySessionWindowEnd orders the results by the session_window_end field. +func BySessionWindowEnd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSessionWindowEnd, opts...).ToFunc() +} + +// BySessionWindowStatus orders the results by the session_window_status field. +func BySessionWindowStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSessionWindowStatus, opts...).ToFunc() +} + +// ByGroupsCount orders the results by groups count. +func ByGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newGroupsStep(), opts...) + } +} + +// ByGroups orders the results by groups terms. +func ByGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAccountGroupsCount orders the results by account_groups count. +func ByAccountGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAccountGroupsStep(), opts...) + } +} + +// ByAccountGroups orders the results by account_groups terms. +func ByAccountGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, GroupsTable, GroupsPrimaryKey...), + ) +} +func newAccountGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountGroupsInverseTable, AccountGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) +} diff --git a/backend/ent/account/where.go b/backend/ent/account/where.go new file mode 100644 index 00000000..f54f538f --- /dev/null +++ b/backend/ent/account/where.go @@ -0,0 +1,1192 @@ +// Code generated by ent, DO NOT EDIT. + +package account + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Account { + return predicate.Account(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Account { + return predicate.Account(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Account { + return predicate.Account(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldName, v)) +} + +// Platform applies equality check predicate on the "platform" field. It's identical to PlatformEQ. +func Platform(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPlatform, v)) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldType, v)) +} + +// ProxyID applies equality check predicate on the "proxy_id" field. It's identical to ProxyIDEQ. +func ProxyID(v int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldProxyID, v)) +} + +// Concurrency applies equality check predicate on the "concurrency" field. It's identical to ConcurrencyEQ. +func Concurrency(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldConcurrency, v)) +} + +// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ. +func Priority(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPriority, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldStatus, v)) +} + +// ErrorMessage applies equality check predicate on the "error_message" field. It's identical to ErrorMessageEQ. +func ErrorMessage(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldErrorMessage, v)) +} + +// LastUsedAt applies equality check predicate on the "last_used_at" field. It's identical to LastUsedAtEQ. +func LastUsedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldLastUsedAt, v)) +} + +// Schedulable applies equality check predicate on the "schedulable" field. It's identical to SchedulableEQ. +func Schedulable(v bool) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSchedulable, v)) +} + +// RateLimitedAt applies equality check predicate on the "rate_limited_at" field. It's identical to RateLimitedAtEQ. +func RateLimitedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitedAt, v)) +} + +// RateLimitResetAt applies equality check predicate on the "rate_limit_reset_at" field. It's identical to RateLimitResetAtEQ. +func RateLimitResetAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitResetAt, v)) +} + +// OverloadUntil applies equality check predicate on the "overload_until" field. It's identical to OverloadUntilEQ. +func OverloadUntil(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v)) +} + +// SessionWindowStart applies equality check predicate on the "session_window_start" field. It's identical to SessionWindowStartEQ. +func SessionWindowStart(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v)) +} + +// SessionWindowEnd applies equality check predicate on the "session_window_end" field. It's identical to SessionWindowEndEQ. +func SessionWindowEnd(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowEnd, v)) +} + +// SessionWindowStatus applies equality check predicate on the "session_window_status" field. It's identical to SessionWindowStatusEQ. +func SessionWindowStatus(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStatus, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldDeletedAt)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldName, v)) +} + +// PlatformEQ applies the EQ predicate on the "platform" field. +func PlatformEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPlatform, v)) +} + +// PlatformNEQ applies the NEQ predicate on the "platform" field. +func PlatformNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldPlatform, v)) +} + +// PlatformIn applies the In predicate on the "platform" field. +func PlatformIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldPlatform, vs...)) +} + +// PlatformNotIn applies the NotIn predicate on the "platform" field. +func PlatformNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldPlatform, vs...)) +} + +// PlatformGT applies the GT predicate on the "platform" field. +func PlatformGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldPlatform, v)) +} + +// PlatformGTE applies the GTE predicate on the "platform" field. +func PlatformGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldPlatform, v)) +} + +// PlatformLT applies the LT predicate on the "platform" field. +func PlatformLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldPlatform, v)) +} + +// PlatformLTE applies the LTE predicate on the "platform" field. +func PlatformLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldPlatform, v)) +} + +// PlatformContains applies the Contains predicate on the "platform" field. +func PlatformContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldPlatform, v)) +} + +// PlatformHasPrefix applies the HasPrefix predicate on the "platform" field. +func PlatformHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldPlatform, v)) +} + +// PlatformHasSuffix applies the HasSuffix predicate on the "platform" field. +func PlatformHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldPlatform, v)) +} + +// PlatformEqualFold applies the EqualFold predicate on the "platform" field. +func PlatformEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldPlatform, v)) +} + +// PlatformContainsFold applies the ContainsFold predicate on the "platform" field. +func PlatformContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldPlatform, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldType, vs...)) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldType, v)) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldType, v)) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldType, v)) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldType, v)) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldType, v)) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldType, v)) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldType, v)) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldType, v)) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldType, v)) +} + +// ProxyIDEQ applies the EQ predicate on the "proxy_id" field. +func ProxyIDEQ(v int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldProxyID, v)) +} + +// ProxyIDNEQ applies the NEQ predicate on the "proxy_id" field. +func ProxyIDNEQ(v int64) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldProxyID, v)) +} + +// ProxyIDIn applies the In predicate on the "proxy_id" field. +func ProxyIDIn(vs ...int64) predicate.Account { + return predicate.Account(sql.FieldIn(FieldProxyID, vs...)) +} + +// ProxyIDNotIn applies the NotIn predicate on the "proxy_id" field. +func ProxyIDNotIn(vs ...int64) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldProxyID, vs...)) +} + +// ProxyIDGT applies the GT predicate on the "proxy_id" field. +func ProxyIDGT(v int64) predicate.Account { + return predicate.Account(sql.FieldGT(FieldProxyID, v)) +} + +// ProxyIDGTE applies the GTE predicate on the "proxy_id" field. +func ProxyIDGTE(v int64) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldProxyID, v)) +} + +// ProxyIDLT applies the LT predicate on the "proxy_id" field. +func ProxyIDLT(v int64) predicate.Account { + return predicate.Account(sql.FieldLT(FieldProxyID, v)) +} + +// ProxyIDLTE applies the LTE predicate on the "proxy_id" field. +func ProxyIDLTE(v int64) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldProxyID, v)) +} + +// ProxyIDIsNil applies the IsNil predicate on the "proxy_id" field. +func ProxyIDIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldProxyID)) +} + +// ProxyIDNotNil applies the NotNil predicate on the "proxy_id" field. +func ProxyIDNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldProxyID)) +} + +// ConcurrencyEQ applies the EQ predicate on the "concurrency" field. +func ConcurrencyEQ(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldConcurrency, v)) +} + +// ConcurrencyNEQ applies the NEQ predicate on the "concurrency" field. +func ConcurrencyNEQ(v int) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldConcurrency, v)) +} + +// ConcurrencyIn applies the In predicate on the "concurrency" field. +func ConcurrencyIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldIn(FieldConcurrency, vs...)) +} + +// ConcurrencyNotIn applies the NotIn predicate on the "concurrency" field. +func ConcurrencyNotIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldConcurrency, vs...)) +} + +// ConcurrencyGT applies the GT predicate on the "concurrency" field. +func ConcurrencyGT(v int) predicate.Account { + return predicate.Account(sql.FieldGT(FieldConcurrency, v)) +} + +// ConcurrencyGTE applies the GTE predicate on the "concurrency" field. +func ConcurrencyGTE(v int) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldConcurrency, v)) +} + +// ConcurrencyLT applies the LT predicate on the "concurrency" field. +func ConcurrencyLT(v int) predicate.Account { + return predicate.Account(sql.FieldLT(FieldConcurrency, v)) +} + +// ConcurrencyLTE applies the LTE predicate on the "concurrency" field. +func ConcurrencyLTE(v int) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldConcurrency, v)) +} + +// PriorityEQ applies the EQ predicate on the "priority" field. +func PriorityEQ(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPriority, v)) +} + +// PriorityNEQ applies the NEQ predicate on the "priority" field. +func PriorityNEQ(v int) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldPriority, v)) +} + +// PriorityIn applies the In predicate on the "priority" field. +func PriorityIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldIn(FieldPriority, vs...)) +} + +// PriorityNotIn applies the NotIn predicate on the "priority" field. +func PriorityNotIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldPriority, vs...)) +} + +// PriorityGT applies the GT predicate on the "priority" field. +func PriorityGT(v int) predicate.Account { + return predicate.Account(sql.FieldGT(FieldPriority, v)) +} + +// PriorityGTE applies the GTE predicate on the "priority" field. +func PriorityGTE(v int) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldPriority, v)) +} + +// PriorityLT applies the LT predicate on the "priority" field. +func PriorityLT(v int) predicate.Account { + return predicate.Account(sql.FieldLT(FieldPriority, v)) +} + +// PriorityLTE applies the LTE predicate on the "priority" field. +func PriorityLTE(v int) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldPriority, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldStatus, v)) +} + +// ErrorMessageEQ applies the EQ predicate on the "error_message" field. +func ErrorMessageEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldErrorMessage, v)) +} + +// ErrorMessageNEQ applies the NEQ predicate on the "error_message" field. +func ErrorMessageNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldErrorMessage, v)) +} + +// ErrorMessageIn applies the In predicate on the "error_message" field. +func ErrorMessageIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageNotIn applies the NotIn predicate on the "error_message" field. +func ErrorMessageNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageGT applies the GT predicate on the "error_message" field. +func ErrorMessageGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldErrorMessage, v)) +} + +// ErrorMessageGTE applies the GTE predicate on the "error_message" field. +func ErrorMessageGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldErrorMessage, v)) +} + +// ErrorMessageLT applies the LT predicate on the "error_message" field. +func ErrorMessageLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldErrorMessage, v)) +} + +// ErrorMessageLTE applies the LTE predicate on the "error_message" field. +func ErrorMessageLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldErrorMessage, v)) +} + +// ErrorMessageContains applies the Contains predicate on the "error_message" field. +func ErrorMessageContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldErrorMessage, v)) +} + +// ErrorMessageHasPrefix applies the HasPrefix predicate on the "error_message" field. +func ErrorMessageHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldErrorMessage, v)) +} + +// ErrorMessageHasSuffix applies the HasSuffix predicate on the "error_message" field. +func ErrorMessageHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldErrorMessage, v)) +} + +// ErrorMessageIsNil applies the IsNil predicate on the "error_message" field. +func ErrorMessageIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldErrorMessage)) +} + +// ErrorMessageNotNil applies the NotNil predicate on the "error_message" field. +func ErrorMessageNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldErrorMessage)) +} + +// ErrorMessageEqualFold applies the EqualFold predicate on the "error_message" field. +func ErrorMessageEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldErrorMessage, v)) +} + +// ErrorMessageContainsFold applies the ContainsFold predicate on the "error_message" field. +func ErrorMessageContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldErrorMessage, v)) +} + +// LastUsedAtEQ applies the EQ predicate on the "last_used_at" field. +func LastUsedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldLastUsedAt, v)) +} + +// LastUsedAtNEQ applies the NEQ predicate on the "last_used_at" field. +func LastUsedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldLastUsedAt, v)) +} + +// LastUsedAtIn applies the In predicate on the "last_used_at" field. +func LastUsedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldLastUsedAt, vs...)) +} + +// LastUsedAtNotIn applies the NotIn predicate on the "last_used_at" field. +func LastUsedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldLastUsedAt, vs...)) +} + +// LastUsedAtGT applies the GT predicate on the "last_used_at" field. +func LastUsedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldLastUsedAt, v)) +} + +// LastUsedAtGTE applies the GTE predicate on the "last_used_at" field. +func LastUsedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldLastUsedAt, v)) +} + +// LastUsedAtLT applies the LT predicate on the "last_used_at" field. +func LastUsedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldLastUsedAt, v)) +} + +// LastUsedAtLTE applies the LTE predicate on the "last_used_at" field. +func LastUsedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldLastUsedAt, v)) +} + +// LastUsedAtIsNil applies the IsNil predicate on the "last_used_at" field. +func LastUsedAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldLastUsedAt)) +} + +// LastUsedAtNotNil applies the NotNil predicate on the "last_used_at" field. +func LastUsedAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldLastUsedAt)) +} + +// SchedulableEQ applies the EQ predicate on the "schedulable" field. +func SchedulableEQ(v bool) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSchedulable, v)) +} + +// SchedulableNEQ applies the NEQ predicate on the "schedulable" field. +func SchedulableNEQ(v bool) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSchedulable, v)) +} + +// RateLimitedAtEQ applies the EQ predicate on the "rate_limited_at" field. +func RateLimitedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitedAt, v)) +} + +// RateLimitedAtNEQ applies the NEQ predicate on the "rate_limited_at" field. +func RateLimitedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldRateLimitedAt, v)) +} + +// RateLimitedAtIn applies the In predicate on the "rate_limited_at" field. +func RateLimitedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldRateLimitedAt, vs...)) +} + +// RateLimitedAtNotIn applies the NotIn predicate on the "rate_limited_at" field. +func RateLimitedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldRateLimitedAt, vs...)) +} + +// RateLimitedAtGT applies the GT predicate on the "rate_limited_at" field. +func RateLimitedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldRateLimitedAt, v)) +} + +// RateLimitedAtGTE applies the GTE predicate on the "rate_limited_at" field. +func RateLimitedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldRateLimitedAt, v)) +} + +// RateLimitedAtLT applies the LT predicate on the "rate_limited_at" field. +func RateLimitedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldRateLimitedAt, v)) +} + +// RateLimitedAtLTE applies the LTE predicate on the "rate_limited_at" field. +func RateLimitedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldRateLimitedAt, v)) +} + +// RateLimitedAtIsNil applies the IsNil predicate on the "rate_limited_at" field. +func RateLimitedAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldRateLimitedAt)) +} + +// RateLimitedAtNotNil applies the NotNil predicate on the "rate_limited_at" field. +func RateLimitedAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldRateLimitedAt)) +} + +// RateLimitResetAtEQ applies the EQ predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtNEQ applies the NEQ predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtIn applies the In predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldRateLimitResetAt, vs...)) +} + +// RateLimitResetAtNotIn applies the NotIn predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldRateLimitResetAt, vs...)) +} + +// RateLimitResetAtGT applies the GT predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtGTE applies the GTE predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtLT applies the LT predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtLTE applies the LTE predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtIsNil applies the IsNil predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldRateLimitResetAt)) +} + +// RateLimitResetAtNotNil applies the NotNil predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldRateLimitResetAt)) +} + +// OverloadUntilEQ applies the EQ predicate on the "overload_until" field. +func OverloadUntilEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v)) +} + +// OverloadUntilNEQ applies the NEQ predicate on the "overload_until" field. +func OverloadUntilNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldOverloadUntil, v)) +} + +// OverloadUntilIn applies the In predicate on the "overload_until" field. +func OverloadUntilIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldOverloadUntil, vs...)) +} + +// OverloadUntilNotIn applies the NotIn predicate on the "overload_until" field. +func OverloadUntilNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldOverloadUntil, vs...)) +} + +// OverloadUntilGT applies the GT predicate on the "overload_until" field. +func OverloadUntilGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldOverloadUntil, v)) +} + +// OverloadUntilGTE applies the GTE predicate on the "overload_until" field. +func OverloadUntilGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldOverloadUntil, v)) +} + +// OverloadUntilLT applies the LT predicate on the "overload_until" field. +func OverloadUntilLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldOverloadUntil, v)) +} + +// OverloadUntilLTE applies the LTE predicate on the "overload_until" field. +func OverloadUntilLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldOverloadUntil, v)) +} + +// OverloadUntilIsNil applies the IsNil predicate on the "overload_until" field. +func OverloadUntilIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldOverloadUntil)) +} + +// OverloadUntilNotNil applies the NotNil predicate on the "overload_until" field. +func OverloadUntilNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldOverloadUntil)) +} + +// SessionWindowStartEQ applies the EQ predicate on the "session_window_start" field. +func SessionWindowStartEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v)) +} + +// SessionWindowStartNEQ applies the NEQ predicate on the "session_window_start" field. +func SessionWindowStartNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSessionWindowStart, v)) +} + +// SessionWindowStartIn applies the In predicate on the "session_window_start" field. +func SessionWindowStartIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldSessionWindowStart, vs...)) +} + +// SessionWindowStartNotIn applies the NotIn predicate on the "session_window_start" field. +func SessionWindowStartNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldSessionWindowStart, vs...)) +} + +// SessionWindowStartGT applies the GT predicate on the "session_window_start" field. +func SessionWindowStartGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldSessionWindowStart, v)) +} + +// SessionWindowStartGTE applies the GTE predicate on the "session_window_start" field. +func SessionWindowStartGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldSessionWindowStart, v)) +} + +// SessionWindowStartLT applies the LT predicate on the "session_window_start" field. +func SessionWindowStartLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldSessionWindowStart, v)) +} + +// SessionWindowStartLTE applies the LTE predicate on the "session_window_start" field. +func SessionWindowStartLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldSessionWindowStart, v)) +} + +// SessionWindowStartIsNil applies the IsNil predicate on the "session_window_start" field. +func SessionWindowStartIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldSessionWindowStart)) +} + +// SessionWindowStartNotNil applies the NotNil predicate on the "session_window_start" field. +func SessionWindowStartNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldSessionWindowStart)) +} + +// SessionWindowEndEQ applies the EQ predicate on the "session_window_end" field. +func SessionWindowEndEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndNEQ applies the NEQ predicate on the "session_window_end" field. +func SessionWindowEndNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndIn applies the In predicate on the "session_window_end" field. +func SessionWindowEndIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldSessionWindowEnd, vs...)) +} + +// SessionWindowEndNotIn applies the NotIn predicate on the "session_window_end" field. +func SessionWindowEndNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldSessionWindowEnd, vs...)) +} + +// SessionWindowEndGT applies the GT predicate on the "session_window_end" field. +func SessionWindowEndGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndGTE applies the GTE predicate on the "session_window_end" field. +func SessionWindowEndGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndLT applies the LT predicate on the "session_window_end" field. +func SessionWindowEndLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndLTE applies the LTE predicate on the "session_window_end" field. +func SessionWindowEndLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndIsNil applies the IsNil predicate on the "session_window_end" field. +func SessionWindowEndIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldSessionWindowEnd)) +} + +// SessionWindowEndNotNil applies the NotNil predicate on the "session_window_end" field. +func SessionWindowEndNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldSessionWindowEnd)) +} + +// SessionWindowStatusEQ applies the EQ predicate on the "session_window_status" field. +func SessionWindowStatusEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusNEQ applies the NEQ predicate on the "session_window_status" field. +func SessionWindowStatusNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusIn applies the In predicate on the "session_window_status" field. +func SessionWindowStatusIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldSessionWindowStatus, vs...)) +} + +// SessionWindowStatusNotIn applies the NotIn predicate on the "session_window_status" field. +func SessionWindowStatusNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldSessionWindowStatus, vs...)) +} + +// SessionWindowStatusGT applies the GT predicate on the "session_window_status" field. +func SessionWindowStatusGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusGTE applies the GTE predicate on the "session_window_status" field. +func SessionWindowStatusGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusLT applies the LT predicate on the "session_window_status" field. +func SessionWindowStatusLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusLTE applies the LTE predicate on the "session_window_status" field. +func SessionWindowStatusLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusContains applies the Contains predicate on the "session_window_status" field. +func SessionWindowStatusContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusHasPrefix applies the HasPrefix predicate on the "session_window_status" field. +func SessionWindowStatusHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusHasSuffix applies the HasSuffix predicate on the "session_window_status" field. +func SessionWindowStatusHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusIsNil applies the IsNil predicate on the "session_window_status" field. +func SessionWindowStatusIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldSessionWindowStatus)) +} + +// SessionWindowStatusNotNil applies the NotNil predicate on the "session_window_status" field. +func SessionWindowStatusNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldSessionWindowStatus)) +} + +// SessionWindowStatusEqualFold applies the EqualFold predicate on the "session_window_status" field. +func SessionWindowStatusEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusContainsFold applies the ContainsFold predicate on the "session_window_status" field. +func SessionWindowStatusContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldSessionWindowStatus, v)) +} + +// HasGroups applies the HasEdge predicate on the "groups" edge. +func HasGroups() predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, GroupsTable, GroupsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupsWith applies the HasEdge predicate on the "groups" edge with a given conditions (other predicates). +func HasGroupsWith(preds ...predicate.Group) predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := newGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAccountGroups applies the HasEdge predicate on the "account_groups" edge. +func HasAccountGroups() predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountGroupsWith applies the HasEdge predicate on the "account_groups" edge with a given conditions (other predicates). +func HasAccountGroupsWith(preds ...predicate.AccountGroup) predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := newAccountGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Account) predicate.Account { + return predicate.Account(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Account) predicate.Account { + return predicate.Account(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Account) predicate.Account { + return predicate.Account(sql.NotPredicates(p)) +} diff --git a/backend/ent/account_create.go b/backend/ent/account_create.go new file mode 100644 index 00000000..6d813817 --- /dev/null +++ b/backend/ent/account_create.go @@ -0,0 +1,1959 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/group" +) + +// AccountCreate is the builder for creating a Account entity. +type AccountCreate struct { + config + mutation *AccountMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AccountCreate) SetCreatedAt(v time.Time) *AccountCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableCreatedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *AccountCreate) SetUpdatedAt(v time.Time) *AccountCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableUpdatedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *AccountCreate) SetDeletedAt(v time.Time) *AccountCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableDeletedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *AccountCreate) SetName(v string) *AccountCreate { + _c.mutation.SetName(v) + return _c +} + +// SetPlatform sets the "platform" field. +func (_c *AccountCreate) SetPlatform(v string) *AccountCreate { + _c.mutation.SetPlatform(v) + return _c +} + +// SetType sets the "type" field. +func (_c *AccountCreate) SetType(v string) *AccountCreate { + _c.mutation.SetType(v) + return _c +} + +// SetCredentials sets the "credentials" field. +func (_c *AccountCreate) SetCredentials(v map[string]interface{}) *AccountCreate { + _c.mutation.SetCredentials(v) + return _c +} + +// SetExtra sets the "extra" field. +func (_c *AccountCreate) SetExtra(v map[string]interface{}) *AccountCreate { + _c.mutation.SetExtra(v) + return _c +} + +// SetProxyID sets the "proxy_id" field. +func (_c *AccountCreate) SetProxyID(v int64) *AccountCreate { + _c.mutation.SetProxyID(v) + return _c +} + +// SetNillableProxyID sets the "proxy_id" field if the given value is not nil. +func (_c *AccountCreate) SetNillableProxyID(v *int64) *AccountCreate { + if v != nil { + _c.SetProxyID(*v) + } + return _c +} + +// SetConcurrency sets the "concurrency" field. +func (_c *AccountCreate) SetConcurrency(v int) *AccountCreate { + _c.mutation.SetConcurrency(v) + return _c +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_c *AccountCreate) SetNillableConcurrency(v *int) *AccountCreate { + if v != nil { + _c.SetConcurrency(*v) + } + return _c +} + +// SetPriority sets the "priority" field. +func (_c *AccountCreate) SetPriority(v int) *AccountCreate { + _c.mutation.SetPriority(v) + return _c +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_c *AccountCreate) SetNillablePriority(v *int) *AccountCreate { + if v != nil { + _c.SetPriority(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *AccountCreate) SetStatus(v string) *AccountCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *AccountCreate) SetNillableStatus(v *string) *AccountCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetErrorMessage sets the "error_message" field. +func (_c *AccountCreate) SetErrorMessage(v string) *AccountCreate { + _c.mutation.SetErrorMessage(v) + return _c +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_c *AccountCreate) SetNillableErrorMessage(v *string) *AccountCreate { + if v != nil { + _c.SetErrorMessage(*v) + } + return _c +} + +// SetLastUsedAt sets the "last_used_at" field. +func (_c *AccountCreate) SetLastUsedAt(v time.Time) *AccountCreate { + _c.mutation.SetLastUsedAt(v) + return _c +} + +// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableLastUsedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetLastUsedAt(*v) + } + return _c +} + +// SetSchedulable sets the "schedulable" field. +func (_c *AccountCreate) SetSchedulable(v bool) *AccountCreate { + _c.mutation.SetSchedulable(v) + return _c +} + +// SetNillableSchedulable sets the "schedulable" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSchedulable(v *bool) *AccountCreate { + if v != nil { + _c.SetSchedulable(*v) + } + return _c +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (_c *AccountCreate) SetRateLimitedAt(v time.Time) *AccountCreate { + _c.mutation.SetRateLimitedAt(v) + return _c +} + +// SetNillableRateLimitedAt sets the "rate_limited_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableRateLimitedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetRateLimitedAt(*v) + } + return _c +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (_c *AccountCreate) SetRateLimitResetAt(v time.Time) *AccountCreate { + _c.mutation.SetRateLimitResetAt(v) + return _c +} + +// SetNillableRateLimitResetAt sets the "rate_limit_reset_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableRateLimitResetAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetRateLimitResetAt(*v) + } + return _c +} + +// SetOverloadUntil sets the "overload_until" field. +func (_c *AccountCreate) SetOverloadUntil(v time.Time) *AccountCreate { + _c.mutation.SetOverloadUntil(v) + return _c +} + +// SetNillableOverloadUntil sets the "overload_until" field if the given value is not nil. +func (_c *AccountCreate) SetNillableOverloadUntil(v *time.Time) *AccountCreate { + if v != nil { + _c.SetOverloadUntil(*v) + } + return _c +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (_c *AccountCreate) SetSessionWindowStart(v time.Time) *AccountCreate { + _c.mutation.SetSessionWindowStart(v) + return _c +} + +// SetNillableSessionWindowStart sets the "session_window_start" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSessionWindowStart(v *time.Time) *AccountCreate { + if v != nil { + _c.SetSessionWindowStart(*v) + } + return _c +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (_c *AccountCreate) SetSessionWindowEnd(v time.Time) *AccountCreate { + _c.mutation.SetSessionWindowEnd(v) + return _c +} + +// SetNillableSessionWindowEnd sets the "session_window_end" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSessionWindowEnd(v *time.Time) *AccountCreate { + if v != nil { + _c.SetSessionWindowEnd(*v) + } + return _c +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (_c *AccountCreate) SetSessionWindowStatus(v string) *AccountCreate { + _c.mutation.SetSessionWindowStatus(v) + return _c +} + +// SetNillableSessionWindowStatus sets the "session_window_status" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSessionWindowStatus(v *string) *AccountCreate { + if v != nil { + _c.SetSessionWindowStatus(*v) + } + return _c +} + +// AddGroupIDs adds the "groups" edge to the Group entity by IDs. +func (_c *AccountCreate) AddGroupIDs(ids ...int64) *AccountCreate { + _c.mutation.AddGroupIDs(ids...) + return _c +} + +// AddGroups adds the "groups" edges to the Group entity. +func (_c *AccountCreate) AddGroups(v ...*Group) *AccountCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddGroupIDs(ids...) +} + +// Mutation returns the AccountMutation object of the builder. +func (_c *AccountCreate) Mutation() *AccountMutation { + return _c.mutation +} + +// Save creates the Account in the database. +func (_c *AccountCreate) Save(ctx context.Context) (*Account, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AccountCreate) SaveX(ctx context.Context) *Account { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AccountCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if account.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized account.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := account.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if account.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized account.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := account.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Credentials(); !ok { + if account.DefaultCredentials == nil { + return fmt.Errorf("ent: uninitialized account.DefaultCredentials (forgotten import ent/runtime?)") + } + v := account.DefaultCredentials() + _c.mutation.SetCredentials(v) + } + if _, ok := _c.mutation.Extra(); !ok { + if account.DefaultExtra == nil { + return fmt.Errorf("ent: uninitialized account.DefaultExtra (forgotten import ent/runtime?)") + } + v := account.DefaultExtra() + _c.mutation.SetExtra(v) + } + if _, ok := _c.mutation.Concurrency(); !ok { + v := account.DefaultConcurrency + _c.mutation.SetConcurrency(v) + } + if _, ok := _c.mutation.Priority(); !ok { + v := account.DefaultPriority + _c.mutation.SetPriority(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := account.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.Schedulable(); !ok { + v := account.DefaultSchedulable + _c.mutation.SetSchedulable(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AccountCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Account.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Account.updated_at"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Account.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := account.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Account.name": %w`, err)} + } + } + if _, ok := _c.mutation.Platform(); !ok { + return &ValidationError{Name: "platform", err: errors.New(`ent: missing required field "Account.platform"`)} + } + if v, ok := _c.mutation.Platform(); ok { + if err := account.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Account.platform": %w`, err)} + } + } + if _, ok := _c.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "Account.type"`)} + } + if v, ok := _c.mutation.GetType(); ok { + if err := account.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Account.type": %w`, err)} + } + } + if _, ok := _c.mutation.Credentials(); !ok { + return &ValidationError{Name: "credentials", err: errors.New(`ent: missing required field "Account.credentials"`)} + } + if _, ok := _c.mutation.Extra(); !ok { + return &ValidationError{Name: "extra", err: errors.New(`ent: missing required field "Account.extra"`)} + } + if _, ok := _c.mutation.Concurrency(); !ok { + return &ValidationError{Name: "concurrency", err: errors.New(`ent: missing required field "Account.concurrency"`)} + } + if _, ok := _c.mutation.Priority(); !ok { + return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "Account.priority"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Account.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := account.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Account.status": %w`, err)} + } + } + if _, ok := _c.mutation.Schedulable(); !ok { + return &ValidationError{Name: "schedulable", err: errors.New(`ent: missing required field "Account.schedulable"`)} + } + if v, ok := _c.mutation.SessionWindowStatus(); ok { + if err := account.SessionWindowStatusValidator(v); err != nil { + return &ValidationError{Name: "session_window_status", err: fmt.Errorf(`ent: validator failed for field "Account.session_window_status": %w`, err)} + } + } + return nil +} + +func (_c *AccountCreate) sqlSave(ctx context.Context) (*Account, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) { + var ( + _node = &Account{config: _c.config} + _spec = sqlgraph.NewCreateSpec(account.Table, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(account.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(account.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(account.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(account.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Platform(); ok { + _spec.SetField(account.FieldPlatform, field.TypeString, value) + _node.Platform = value + } + if value, ok := _c.mutation.GetType(); ok { + _spec.SetField(account.FieldType, field.TypeString, value) + _node.Type = value + } + if value, ok := _c.mutation.Credentials(); ok { + _spec.SetField(account.FieldCredentials, field.TypeJSON, value) + _node.Credentials = value + } + if value, ok := _c.mutation.Extra(); ok { + _spec.SetField(account.FieldExtra, field.TypeJSON, value) + _node.Extra = value + } + if value, ok := _c.mutation.ProxyID(); ok { + _spec.SetField(account.FieldProxyID, field.TypeInt64, value) + _node.ProxyID = &value + } + if value, ok := _c.mutation.Concurrency(); ok { + _spec.SetField(account.FieldConcurrency, field.TypeInt, value) + _node.Concurrency = value + } + if value, ok := _c.mutation.Priority(); ok { + _spec.SetField(account.FieldPriority, field.TypeInt, value) + _node.Priority = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(account.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.ErrorMessage(); ok { + _spec.SetField(account.FieldErrorMessage, field.TypeString, value) + _node.ErrorMessage = &value + } + if value, ok := _c.mutation.LastUsedAt(); ok { + _spec.SetField(account.FieldLastUsedAt, field.TypeTime, value) + _node.LastUsedAt = &value + } + if value, ok := _c.mutation.Schedulable(); ok { + _spec.SetField(account.FieldSchedulable, field.TypeBool, value) + _node.Schedulable = value + } + if value, ok := _c.mutation.RateLimitedAt(); ok { + _spec.SetField(account.FieldRateLimitedAt, field.TypeTime, value) + _node.RateLimitedAt = &value + } + if value, ok := _c.mutation.RateLimitResetAt(); ok { + _spec.SetField(account.FieldRateLimitResetAt, field.TypeTime, value) + _node.RateLimitResetAt = &value + } + if value, ok := _c.mutation.OverloadUntil(); ok { + _spec.SetField(account.FieldOverloadUntil, field.TypeTime, value) + _node.OverloadUntil = &value + } + if value, ok := _c.mutation.SessionWindowStart(); ok { + _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) + _node.SessionWindowStart = &value + } + if value, ok := _c.mutation.SessionWindowEnd(); ok { + _spec.SetField(account.FieldSessionWindowEnd, field.TypeTime, value) + _node.SessionWindowEnd = &value + } + if value, ok := _c.mutation.SessionWindowStatus(); ok { + _spec.SetField(account.FieldSessionWindowStatus, field.TypeString, value) + _node.SessionWindowStatus = &value + } + if nodes := _c.mutation.GroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _c.config, mutation: newAccountGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Account.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *AccountCreate) OnConflict(opts ...sql.ConflictOption) *AccountUpsertOne { + _c.conflict = opts + return &AccountUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountCreate) OnConflictColumns(columns ...string) *AccountUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountUpsertOne{ + create: _c, + } +} + +type ( + // AccountUpsertOne is the builder for "upsert"-ing + // one Account node. + AccountUpsertOne struct { + create *AccountCreate + } + + // AccountUpsert is the "OnConflict" setter. + AccountUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *AccountUpsert) SetUpdatedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateUpdatedAt() *AccountUpsert { + u.SetExcluded(account.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *AccountUpsert) SetDeletedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateDeletedAt() *AccountUpsert { + u.SetExcluded(account.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *AccountUpsert) ClearDeletedAt() *AccountUpsert { + u.SetNull(account.FieldDeletedAt) + return u +} + +// SetName sets the "name" field. +func (u *AccountUpsert) SetName(v string) *AccountUpsert { + u.Set(account.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccountUpsert) UpdateName() *AccountUpsert { + u.SetExcluded(account.FieldName) + return u +} + +// SetPlatform sets the "platform" field. +func (u *AccountUpsert) SetPlatform(v string) *AccountUpsert { + u.Set(account.FieldPlatform, v) + return u +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *AccountUpsert) UpdatePlatform() *AccountUpsert { + u.SetExcluded(account.FieldPlatform) + return u +} + +// SetType sets the "type" field. +func (u *AccountUpsert) SetType(v string) *AccountUpsert { + u.Set(account.FieldType, v) + return u +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *AccountUpsert) UpdateType() *AccountUpsert { + u.SetExcluded(account.FieldType) + return u +} + +// SetCredentials sets the "credentials" field. +func (u *AccountUpsert) SetCredentials(v map[string]interface{}) *AccountUpsert { + u.Set(account.FieldCredentials, v) + return u +} + +// UpdateCredentials sets the "credentials" field to the value that was provided on create. +func (u *AccountUpsert) UpdateCredentials() *AccountUpsert { + u.SetExcluded(account.FieldCredentials) + return u +} + +// SetExtra sets the "extra" field. +func (u *AccountUpsert) SetExtra(v map[string]interface{}) *AccountUpsert { + u.Set(account.FieldExtra, v) + return u +} + +// UpdateExtra sets the "extra" field to the value that was provided on create. +func (u *AccountUpsert) UpdateExtra() *AccountUpsert { + u.SetExcluded(account.FieldExtra) + return u +} + +// SetProxyID sets the "proxy_id" field. +func (u *AccountUpsert) SetProxyID(v int64) *AccountUpsert { + u.Set(account.FieldProxyID, v) + return u +} + +// UpdateProxyID sets the "proxy_id" field to the value that was provided on create. +func (u *AccountUpsert) UpdateProxyID() *AccountUpsert { + u.SetExcluded(account.FieldProxyID) + return u +} + +// AddProxyID adds v to the "proxy_id" field. +func (u *AccountUpsert) AddProxyID(v int64) *AccountUpsert { + u.Add(account.FieldProxyID, v) + return u +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (u *AccountUpsert) ClearProxyID() *AccountUpsert { + u.SetNull(account.FieldProxyID) + return u +} + +// SetConcurrency sets the "concurrency" field. +func (u *AccountUpsert) SetConcurrency(v int) *AccountUpsert { + u.Set(account.FieldConcurrency, v) + return u +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *AccountUpsert) UpdateConcurrency() *AccountUpsert { + u.SetExcluded(account.FieldConcurrency) + return u +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *AccountUpsert) AddConcurrency(v int) *AccountUpsert { + u.Add(account.FieldConcurrency, v) + return u +} + +// SetPriority sets the "priority" field. +func (u *AccountUpsert) SetPriority(v int) *AccountUpsert { + u.Set(account.FieldPriority, v) + return u +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountUpsert) UpdatePriority() *AccountUpsert { + u.SetExcluded(account.FieldPriority) + return u +} + +// AddPriority adds v to the "priority" field. +func (u *AccountUpsert) AddPriority(v int) *AccountUpsert { + u.Add(account.FieldPriority, v) + return u +} + +// SetStatus sets the "status" field. +func (u *AccountUpsert) SetStatus(v string) *AccountUpsert { + u.Set(account.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AccountUpsert) UpdateStatus() *AccountUpsert { + u.SetExcluded(account.FieldStatus) + return u +} + +// SetErrorMessage sets the "error_message" field. +func (u *AccountUpsert) SetErrorMessage(v string) *AccountUpsert { + u.Set(account.FieldErrorMessage, v) + return u +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *AccountUpsert) UpdateErrorMessage() *AccountUpsert { + u.SetExcluded(account.FieldErrorMessage) + return u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *AccountUpsert) ClearErrorMessage() *AccountUpsert { + u.SetNull(account.FieldErrorMessage) + return u +} + +// SetLastUsedAt sets the "last_used_at" field. +func (u *AccountUpsert) SetLastUsedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldLastUsedAt, v) + return u +} + +// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateLastUsedAt() *AccountUpsert { + u.SetExcluded(account.FieldLastUsedAt) + return u +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (u *AccountUpsert) ClearLastUsedAt() *AccountUpsert { + u.SetNull(account.FieldLastUsedAt) + return u +} + +// SetSchedulable sets the "schedulable" field. +func (u *AccountUpsert) SetSchedulable(v bool) *AccountUpsert { + u.Set(account.FieldSchedulable, v) + return u +} + +// UpdateSchedulable sets the "schedulable" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSchedulable() *AccountUpsert { + u.SetExcluded(account.FieldSchedulable) + return u +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (u *AccountUpsert) SetRateLimitedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldRateLimitedAt, v) + return u +} + +// UpdateRateLimitedAt sets the "rate_limited_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateRateLimitedAt() *AccountUpsert { + u.SetExcluded(account.FieldRateLimitedAt) + return u +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (u *AccountUpsert) ClearRateLimitedAt() *AccountUpsert { + u.SetNull(account.FieldRateLimitedAt) + return u +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (u *AccountUpsert) SetRateLimitResetAt(v time.Time) *AccountUpsert { + u.Set(account.FieldRateLimitResetAt, v) + return u +} + +// UpdateRateLimitResetAt sets the "rate_limit_reset_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateRateLimitResetAt() *AccountUpsert { + u.SetExcluded(account.FieldRateLimitResetAt) + return u +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (u *AccountUpsert) ClearRateLimitResetAt() *AccountUpsert { + u.SetNull(account.FieldRateLimitResetAt) + return u +} + +// SetOverloadUntil sets the "overload_until" field. +func (u *AccountUpsert) SetOverloadUntil(v time.Time) *AccountUpsert { + u.Set(account.FieldOverloadUntil, v) + return u +} + +// UpdateOverloadUntil sets the "overload_until" field to the value that was provided on create. +func (u *AccountUpsert) UpdateOverloadUntil() *AccountUpsert { + u.SetExcluded(account.FieldOverloadUntil) + return u +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (u *AccountUpsert) ClearOverloadUntil() *AccountUpsert { + u.SetNull(account.FieldOverloadUntil) + return u +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (u *AccountUpsert) SetSessionWindowStart(v time.Time) *AccountUpsert { + u.Set(account.FieldSessionWindowStart, v) + return u +} + +// UpdateSessionWindowStart sets the "session_window_start" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSessionWindowStart() *AccountUpsert { + u.SetExcluded(account.FieldSessionWindowStart) + return u +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (u *AccountUpsert) ClearSessionWindowStart() *AccountUpsert { + u.SetNull(account.FieldSessionWindowStart) + return u +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (u *AccountUpsert) SetSessionWindowEnd(v time.Time) *AccountUpsert { + u.Set(account.FieldSessionWindowEnd, v) + return u +} + +// UpdateSessionWindowEnd sets the "session_window_end" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSessionWindowEnd() *AccountUpsert { + u.SetExcluded(account.FieldSessionWindowEnd) + return u +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (u *AccountUpsert) ClearSessionWindowEnd() *AccountUpsert { + u.SetNull(account.FieldSessionWindowEnd) + return u +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (u *AccountUpsert) SetSessionWindowStatus(v string) *AccountUpsert { + u.Set(account.FieldSessionWindowStatus, v) + return u +} + +// UpdateSessionWindowStatus sets the "session_window_status" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSessionWindowStatus() *AccountUpsert { + u.SetExcluded(account.FieldSessionWindowStatus) + return u +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (u *AccountUpsert) ClearSessionWindowStatus() *AccountUpsert { + u.SetNull(account.FieldSessionWindowStatus) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountUpsertOne) UpdateNewValues() *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(account.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountUpsertOne) Ignore() *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountUpsertOne) DoNothing() *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountCreate.OnConflict +// documentation for more info. +func (u *AccountUpsertOne) Update(set func(*AccountUpsert)) *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AccountUpsertOne) SetUpdatedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateUpdatedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *AccountUpsertOne) SetDeletedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateDeletedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *AccountUpsertOne) ClearDeletedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *AccountUpsertOne) SetName(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateName() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateName() + }) +} + +// SetPlatform sets the "platform" field. +func (u *AccountUpsertOne) SetPlatform(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdatePlatform() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdatePlatform() + }) +} + +// SetType sets the "type" field. +func (u *AccountUpsertOne) SetType(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateType() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateType() + }) +} + +// SetCredentials sets the "credentials" field. +func (u *AccountUpsertOne) SetCredentials(v map[string]interface{}) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetCredentials(v) + }) +} + +// UpdateCredentials sets the "credentials" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateCredentials() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateCredentials() + }) +} + +// SetExtra sets the "extra" field. +func (u *AccountUpsertOne) SetExtra(v map[string]interface{}) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetExtra(v) + }) +} + +// UpdateExtra sets the "extra" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateExtra() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateExtra() + }) +} + +// SetProxyID sets the "proxy_id" field. +func (u *AccountUpsertOne) SetProxyID(v int64) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetProxyID(v) + }) +} + +// AddProxyID adds v to the "proxy_id" field. +func (u *AccountUpsertOne) AddProxyID(v int64) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.AddProxyID(v) + }) +} + +// UpdateProxyID sets the "proxy_id" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateProxyID() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateProxyID() + }) +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (u *AccountUpsertOne) ClearProxyID() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearProxyID() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *AccountUpsertOne) SetConcurrency(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *AccountUpsertOne) AddConcurrency(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateConcurrency() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateConcurrency() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountUpsertOne) SetPriority(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountUpsertOne) AddPriority(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdatePriority() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdatePriority() + }) +} + +// SetStatus sets the "status" field. +func (u *AccountUpsertOne) SetStatus(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateStatus() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateStatus() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *AccountUpsertOne) SetErrorMessage(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateErrorMessage() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *AccountUpsertOne) ClearErrorMessage() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearErrorMessage() + }) +} + +// SetLastUsedAt sets the "last_used_at" field. +func (u *AccountUpsertOne) SetLastUsedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetLastUsedAt(v) + }) +} + +// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateLastUsedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateLastUsedAt() + }) +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (u *AccountUpsertOne) ClearLastUsedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearLastUsedAt() + }) +} + +// SetSchedulable sets the "schedulable" field. +func (u *AccountUpsertOne) SetSchedulable(v bool) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSchedulable(v) + }) +} + +// UpdateSchedulable sets the "schedulable" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSchedulable() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSchedulable() + }) +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (u *AccountUpsertOne) SetRateLimitedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitedAt(v) + }) +} + +// UpdateRateLimitedAt sets the "rate_limited_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateRateLimitedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitedAt() + }) +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (u *AccountUpsertOne) ClearRateLimitedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitedAt() + }) +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (u *AccountUpsertOne) SetRateLimitResetAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitResetAt(v) + }) +} + +// UpdateRateLimitResetAt sets the "rate_limit_reset_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateRateLimitResetAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitResetAt() + }) +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (u *AccountUpsertOne) ClearRateLimitResetAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitResetAt() + }) +} + +// SetOverloadUntil sets the "overload_until" field. +func (u *AccountUpsertOne) SetOverloadUntil(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetOverloadUntil(v) + }) +} + +// UpdateOverloadUntil sets the "overload_until" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateOverloadUntil() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateOverloadUntil() + }) +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (u *AccountUpsertOne) ClearOverloadUntil() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearOverloadUntil() + }) +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (u *AccountUpsertOne) SetSessionWindowStart(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStart(v) + }) +} + +// UpdateSessionWindowStart sets the "session_window_start" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSessionWindowStart() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStart() + }) +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (u *AccountUpsertOne) ClearSessionWindowStart() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStart() + }) +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (u *AccountUpsertOne) SetSessionWindowEnd(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowEnd(v) + }) +} + +// UpdateSessionWindowEnd sets the "session_window_end" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSessionWindowEnd() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowEnd() + }) +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (u *AccountUpsertOne) ClearSessionWindowEnd() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowEnd() + }) +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (u *AccountUpsertOne) SetSessionWindowStatus(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStatus(v) + }) +} + +// UpdateSessionWindowStatus sets the "session_window_status" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSessionWindowStatus() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStatus() + }) +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (u *AccountUpsertOne) ClearSessionWindowStatus() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStatus() + }) +} + +// Exec executes the query. +func (u *AccountUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *AccountUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *AccountUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// AccountCreateBulk is the builder for creating many Account entities in bulk. +type AccountCreateBulk struct { + config + err error + builders []*AccountCreate + conflict []sql.ConflictOption +} + +// Save creates the Account entities in the database. +func (_c *AccountCreateBulk) Save(ctx context.Context) ([]*Account, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Account, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AccountMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AccountCreateBulk) SaveX(ctx context.Context) []*Account { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Account.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *AccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *AccountUpsertBulk { + _c.conflict = opts + return &AccountUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountCreateBulk) OnConflictColumns(columns ...string) *AccountUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountUpsertBulk{ + create: _c, + } +} + +// AccountUpsertBulk is the builder for "upsert"-ing +// a bulk of Account nodes. +type AccountUpsertBulk struct { + create *AccountCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountUpsertBulk) UpdateNewValues() *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(account.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountUpsertBulk) Ignore() *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountUpsertBulk) DoNothing() *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountCreateBulk.OnConflict +// documentation for more info. +func (u *AccountUpsertBulk) Update(set func(*AccountUpsert)) *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AccountUpsertBulk) SetUpdatedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateUpdatedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *AccountUpsertBulk) SetDeletedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateDeletedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *AccountUpsertBulk) ClearDeletedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *AccountUpsertBulk) SetName(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateName() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateName() + }) +} + +// SetPlatform sets the "platform" field. +func (u *AccountUpsertBulk) SetPlatform(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdatePlatform() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdatePlatform() + }) +} + +// SetType sets the "type" field. +func (u *AccountUpsertBulk) SetType(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateType() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateType() + }) +} + +// SetCredentials sets the "credentials" field. +func (u *AccountUpsertBulk) SetCredentials(v map[string]interface{}) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetCredentials(v) + }) +} + +// UpdateCredentials sets the "credentials" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateCredentials() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateCredentials() + }) +} + +// SetExtra sets the "extra" field. +func (u *AccountUpsertBulk) SetExtra(v map[string]interface{}) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetExtra(v) + }) +} + +// UpdateExtra sets the "extra" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateExtra() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateExtra() + }) +} + +// SetProxyID sets the "proxy_id" field. +func (u *AccountUpsertBulk) SetProxyID(v int64) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetProxyID(v) + }) +} + +// AddProxyID adds v to the "proxy_id" field. +func (u *AccountUpsertBulk) AddProxyID(v int64) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.AddProxyID(v) + }) +} + +// UpdateProxyID sets the "proxy_id" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateProxyID() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateProxyID() + }) +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (u *AccountUpsertBulk) ClearProxyID() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearProxyID() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *AccountUpsertBulk) SetConcurrency(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *AccountUpsertBulk) AddConcurrency(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateConcurrency() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateConcurrency() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountUpsertBulk) SetPriority(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountUpsertBulk) AddPriority(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdatePriority() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdatePriority() + }) +} + +// SetStatus sets the "status" field. +func (u *AccountUpsertBulk) SetStatus(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateStatus() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateStatus() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *AccountUpsertBulk) SetErrorMessage(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateErrorMessage() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *AccountUpsertBulk) ClearErrorMessage() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearErrorMessage() + }) +} + +// SetLastUsedAt sets the "last_used_at" field. +func (u *AccountUpsertBulk) SetLastUsedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetLastUsedAt(v) + }) +} + +// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateLastUsedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateLastUsedAt() + }) +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (u *AccountUpsertBulk) ClearLastUsedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearLastUsedAt() + }) +} + +// SetSchedulable sets the "schedulable" field. +func (u *AccountUpsertBulk) SetSchedulable(v bool) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSchedulable(v) + }) +} + +// UpdateSchedulable sets the "schedulable" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSchedulable() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSchedulable() + }) +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (u *AccountUpsertBulk) SetRateLimitedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitedAt(v) + }) +} + +// UpdateRateLimitedAt sets the "rate_limited_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateRateLimitedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitedAt() + }) +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (u *AccountUpsertBulk) ClearRateLimitedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitedAt() + }) +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (u *AccountUpsertBulk) SetRateLimitResetAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitResetAt(v) + }) +} + +// UpdateRateLimitResetAt sets the "rate_limit_reset_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateRateLimitResetAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitResetAt() + }) +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (u *AccountUpsertBulk) ClearRateLimitResetAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitResetAt() + }) +} + +// SetOverloadUntil sets the "overload_until" field. +func (u *AccountUpsertBulk) SetOverloadUntil(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetOverloadUntil(v) + }) +} + +// UpdateOverloadUntil sets the "overload_until" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateOverloadUntil() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateOverloadUntil() + }) +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (u *AccountUpsertBulk) ClearOverloadUntil() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearOverloadUntil() + }) +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (u *AccountUpsertBulk) SetSessionWindowStart(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStart(v) + }) +} + +// UpdateSessionWindowStart sets the "session_window_start" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSessionWindowStart() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStart() + }) +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (u *AccountUpsertBulk) ClearSessionWindowStart() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStart() + }) +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (u *AccountUpsertBulk) SetSessionWindowEnd(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowEnd(v) + }) +} + +// UpdateSessionWindowEnd sets the "session_window_end" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSessionWindowEnd() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowEnd() + }) +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (u *AccountUpsertBulk) ClearSessionWindowEnd() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowEnd() + }) +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (u *AccountUpsertBulk) SetSessionWindowStatus(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStatus(v) + }) +} + +// UpdateSessionWindowStatus sets the "session_window_status" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSessionWindowStatus() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStatus() + }) +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (u *AccountUpsertBulk) ClearSessionWindowStatus() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStatus() + }) +} + +// Exec executes the query. +func (u *AccountUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AccountCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/account_delete.go b/backend/ent/account_delete.go new file mode 100644 index 00000000..44cf2f55 --- /dev/null +++ b/backend/ent/account_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountDelete is the builder for deleting a Account entity. +type AccountDelete struct { + config + hooks []Hook + mutation *AccountMutation +} + +// Where appends a list predicates to the AccountDelete builder. +func (_d *AccountDelete) Where(ps ...predicate.Account) *AccountDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AccountDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AccountDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(account.Table, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AccountDeleteOne is the builder for deleting a single Account entity. +type AccountDeleteOne struct { + _d *AccountDelete +} + +// Where appends a list predicates to the AccountDelete builder. +func (_d *AccountDeleteOne) Where(ps ...predicate.Account) *AccountDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AccountDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{account.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/account_query.go b/backend/ent/account_query.go new file mode 100644 index 00000000..e5712884 --- /dev/null +++ b/backend/ent/account_query.go @@ -0,0 +1,711 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountQuery is the builder for querying Account entities. +type AccountQuery struct { + config + ctx *QueryContext + order []account.OrderOption + inters []Interceptor + predicates []predicate.Account + withGroups *GroupQuery + withAccountGroups *AccountGroupQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AccountQuery builder. +func (_q *AccountQuery) Where(ps ...predicate.Account) *AccountQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AccountQuery) Limit(limit int) *AccountQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AccountQuery) Offset(offset int) *AccountQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AccountQuery) Unique(unique bool) *AccountQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AccountQuery) Order(o ...account.OrderOption) *AccountQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryGroups chains the current query on the "groups" edge. +func (_q *AccountQuery) QueryGroups() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, account.GroupsTable, account.GroupsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAccountGroups chains the current query on the "account_groups" edge. +func (_q *AccountQuery) QueryAccountGroups() *AccountGroupQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, selector), + sqlgraph.To(accountgroup.Table, accountgroup.AccountColumn), + sqlgraph.Edge(sqlgraph.O2M, true, account.AccountGroupsTable, account.AccountGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Account entity from the query. +// Returns a *NotFoundError when no Account was found. +func (_q *AccountQuery) First(ctx context.Context) (*Account, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{account.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AccountQuery) FirstX(ctx context.Context) *Account { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Account ID from the query. +// Returns a *NotFoundError when no Account ID was found. +func (_q *AccountQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{account.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *AccountQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Account entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Account entity is found. +// Returns a *NotFoundError when no Account entities are found. +func (_q *AccountQuery) Only(ctx context.Context) (*Account, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{account.Label} + default: + return nil, &NotSingularError{account.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AccountQuery) OnlyX(ctx context.Context) *Account { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Account ID in the query. +// Returns a *NotSingularError when more than one Account ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *AccountQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{account.Label} + default: + err = &NotSingularError{account.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *AccountQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Accounts. +func (_q *AccountQuery) All(ctx context.Context) ([]*Account, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Account, *AccountQuery]() + return withInterceptors[[]*Account](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AccountQuery) AllX(ctx context.Context) []*Account { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Account IDs. +func (_q *AccountQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(account.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *AccountQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *AccountQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AccountQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AccountQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AccountQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AccountQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AccountQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AccountQuery) Clone() *AccountQuery { + if _q == nil { + return nil + } + return &AccountQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]account.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Account{}, _q.predicates...), + withGroups: _q.withGroups.Clone(), + withAccountGroups: _q.withAccountGroups.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithGroups tells the query-builder to eager-load the nodes that are connected to +// the "groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountQuery) WithGroups(opts ...func(*GroupQuery)) *AccountQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroups = query + return _q +} + +// WithAccountGroups tells the query-builder to eager-load the nodes that are connected to +// the "account_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountQuery) WithAccountGroups(opts ...func(*AccountGroupQuery)) *AccountQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccountGroups = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Account.Query(). +// GroupBy(account.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AccountQuery) GroupBy(field string, fields ...string) *AccountGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AccountGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = account.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Account.Query(). +// Select(account.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *AccountQuery) Select(fields ...string) *AccountSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AccountSelect{AccountQuery: _q} + sbuild.label = account.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AccountSelect configured with the given aggregations. +func (_q *AccountQuery) Aggregate(fns ...AggregateFunc) *AccountSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AccountQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !account.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Account, error) { + var ( + nodes = []*Account{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withGroups != nil, + _q.withAccountGroups != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Account).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Account{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withGroups; query != nil { + if err := _q.loadGroups(ctx, query, nodes, + func(n *Account) { n.Edges.Groups = []*Group{} }, + func(n *Account, e *Group) { n.Edges.Groups = append(n.Edges.Groups, e) }); err != nil { + return nil, err + } + } + if query := _q.withAccountGroups; query != nil { + if err := _q.loadAccountGroups(ctx, query, nodes, + func(n *Account) { n.Edges.AccountGroups = []*AccountGroup{} }, + func(n *Account, e *AccountGroup) { n.Edges.AccountGroups = append(n.Edges.AccountGroups, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AccountQuery) loadGroups(ctx context.Context, query *GroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *Group)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*Account) + nids := make(map[int64]map[*Account]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(account.GroupsTable) + s.Join(joinT).On(s.C(group.FieldID), joinT.C(account.GroupsPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(account.GroupsPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(account.GroupsPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*Account]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Group](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "groups" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *AccountQuery) loadAccountGroups(ctx context.Context, query *AccountGroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *AccountGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Account) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(accountgroup.FieldAccountID) + } + query.Where(predicate.AccountGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(account.AccountGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AccountID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "account_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} + +func (_q *AccountQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AccountQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, account.FieldID) + for i := range fields { + if fields[i] != account.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(account.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = account.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AccountGroupBy is the group-by builder for Account entities. +type AccountGroupBy struct { + selector + build *AccountQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AccountGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AccountGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountQuery, *AccountGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AccountGroupBy) sqlScan(ctx context.Context, root *AccountQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AccountSelect is the builder for selecting fields of Account entities. +type AccountSelect struct { + *AccountQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AccountSelect) Aggregate(fns ...AggregateFunc) *AccountSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AccountSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountQuery, *AccountSelect](ctx, _s.AccountQuery, _s, _s.inters, v) +} + +func (_s *AccountSelect) sqlScan(ctx context.Context, root *AccountQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/account_update.go b/backend/ent/account_update.go new file mode 100644 index 00000000..49eaaea8 --- /dev/null +++ b/backend/ent/account_update.go @@ -0,0 +1,1331 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountUpdate is the builder for updating Account entities. +type AccountUpdate struct { + config + hooks []Hook + mutation *AccountMutation +} + +// Where appends a list predicates to the AccountUpdate builder. +func (_u *AccountUpdate) Where(ps ...predicate.Account) *AccountUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AccountUpdate) SetUpdatedAt(v time.Time) *AccountUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *AccountUpdate) SetDeletedAt(v time.Time) *AccountUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableDeletedAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *AccountUpdate) ClearDeletedAt() *AccountUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *AccountUpdate) SetName(v string) *AccountUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableName(v *string) *AccountUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *AccountUpdate) SetPlatform(v string) *AccountUpdate { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *AccountUpdate) SetNillablePlatform(v *string) *AccountUpdate { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *AccountUpdate) SetType(v string) *AccountUpdate { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableType(v *string) *AccountUpdate { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetCredentials sets the "credentials" field. +func (_u *AccountUpdate) SetCredentials(v map[string]interface{}) *AccountUpdate { + _u.mutation.SetCredentials(v) + return _u +} + +// SetExtra sets the "extra" field. +func (_u *AccountUpdate) SetExtra(v map[string]interface{}) *AccountUpdate { + _u.mutation.SetExtra(v) + return _u +} + +// SetProxyID sets the "proxy_id" field. +func (_u *AccountUpdate) SetProxyID(v int64) *AccountUpdate { + _u.mutation.ResetProxyID() + _u.mutation.SetProxyID(v) + return _u +} + +// SetNillableProxyID sets the "proxy_id" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableProxyID(v *int64) *AccountUpdate { + if v != nil { + _u.SetProxyID(*v) + } + return _u +} + +// AddProxyID adds value to the "proxy_id" field. +func (_u *AccountUpdate) AddProxyID(v int64) *AccountUpdate { + _u.mutation.AddProxyID(v) + return _u +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (_u *AccountUpdate) ClearProxyID() *AccountUpdate { + _u.mutation.ClearProxyID() + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *AccountUpdate) SetConcurrency(v int) *AccountUpdate { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableConcurrency(v *int) *AccountUpdate { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *AccountUpdate) AddConcurrency(v int) *AccountUpdate { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountUpdate) SetPriority(v int) *AccountUpdate { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountUpdate) SetNillablePriority(v *int) *AccountUpdate { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountUpdate) AddPriority(v int) *AccountUpdate { + _u.mutation.AddPriority(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *AccountUpdate) SetStatus(v string) *AccountUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableStatus(v *string) *AccountUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *AccountUpdate) SetErrorMessage(v string) *AccountUpdate { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableErrorMessage(v *string) *AccountUpdate { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *AccountUpdate) ClearErrorMessage() *AccountUpdate { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetLastUsedAt sets the "last_used_at" field. +func (_u *AccountUpdate) SetLastUsedAt(v time.Time) *AccountUpdate { + _u.mutation.SetLastUsedAt(v) + return _u +} + +// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableLastUsedAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetLastUsedAt(*v) + } + return _u +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (_u *AccountUpdate) ClearLastUsedAt() *AccountUpdate { + _u.mutation.ClearLastUsedAt() + return _u +} + +// SetSchedulable sets the "schedulable" field. +func (_u *AccountUpdate) SetSchedulable(v bool) *AccountUpdate { + _u.mutation.SetSchedulable(v) + return _u +} + +// SetNillableSchedulable sets the "schedulable" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSchedulable(v *bool) *AccountUpdate { + if v != nil { + _u.SetSchedulable(*v) + } + return _u +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (_u *AccountUpdate) SetRateLimitedAt(v time.Time) *AccountUpdate { + _u.mutation.SetRateLimitedAt(v) + return _u +} + +// SetNillableRateLimitedAt sets the "rate_limited_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableRateLimitedAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetRateLimitedAt(*v) + } + return _u +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (_u *AccountUpdate) ClearRateLimitedAt() *AccountUpdate { + _u.mutation.ClearRateLimitedAt() + return _u +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (_u *AccountUpdate) SetRateLimitResetAt(v time.Time) *AccountUpdate { + _u.mutation.SetRateLimitResetAt(v) + return _u +} + +// SetNillableRateLimitResetAt sets the "rate_limit_reset_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableRateLimitResetAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetRateLimitResetAt(*v) + } + return _u +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (_u *AccountUpdate) ClearRateLimitResetAt() *AccountUpdate { + _u.mutation.ClearRateLimitResetAt() + return _u +} + +// SetOverloadUntil sets the "overload_until" field. +func (_u *AccountUpdate) SetOverloadUntil(v time.Time) *AccountUpdate { + _u.mutation.SetOverloadUntil(v) + return _u +} + +// SetNillableOverloadUntil sets the "overload_until" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableOverloadUntil(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetOverloadUntil(*v) + } + return _u +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (_u *AccountUpdate) ClearOverloadUntil() *AccountUpdate { + _u.mutation.ClearOverloadUntil() + return _u +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (_u *AccountUpdate) SetSessionWindowStart(v time.Time) *AccountUpdate { + _u.mutation.SetSessionWindowStart(v) + return _u +} + +// SetNillableSessionWindowStart sets the "session_window_start" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSessionWindowStart(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetSessionWindowStart(*v) + } + return _u +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (_u *AccountUpdate) ClearSessionWindowStart() *AccountUpdate { + _u.mutation.ClearSessionWindowStart() + return _u +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (_u *AccountUpdate) SetSessionWindowEnd(v time.Time) *AccountUpdate { + _u.mutation.SetSessionWindowEnd(v) + return _u +} + +// SetNillableSessionWindowEnd sets the "session_window_end" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSessionWindowEnd(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetSessionWindowEnd(*v) + } + return _u +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (_u *AccountUpdate) ClearSessionWindowEnd() *AccountUpdate { + _u.mutation.ClearSessionWindowEnd() + return _u +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (_u *AccountUpdate) SetSessionWindowStatus(v string) *AccountUpdate { + _u.mutation.SetSessionWindowStatus(v) + return _u +} + +// SetNillableSessionWindowStatus sets the "session_window_status" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSessionWindowStatus(v *string) *AccountUpdate { + if v != nil { + _u.SetSessionWindowStatus(*v) + } + return _u +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (_u *AccountUpdate) ClearSessionWindowStatus() *AccountUpdate { + _u.mutation.ClearSessionWindowStatus() + return _u +} + +// AddGroupIDs adds the "groups" edge to the Group entity by IDs. +func (_u *AccountUpdate) AddGroupIDs(ids ...int64) *AccountUpdate { + _u.mutation.AddGroupIDs(ids...) + return _u +} + +// AddGroups adds the "groups" edges to the Group entity. +func (_u *AccountUpdate) AddGroups(v ...*Group) *AccountUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddGroupIDs(ids...) +} + +// Mutation returns the AccountMutation object of the builder. +func (_u *AccountUpdate) Mutation() *AccountMutation { + return _u.mutation +} + +// ClearGroups clears all "groups" edges to the Group entity. +func (_u *AccountUpdate) ClearGroups() *AccountUpdate { + _u.mutation.ClearGroups() + return _u +} + +// RemoveGroupIDs removes the "groups" edge to Group entities by IDs. +func (_u *AccountUpdate) RemoveGroupIDs(ids ...int64) *AccountUpdate { + _u.mutation.RemoveGroupIDs(ids...) + return _u +} + +// RemoveGroups removes "groups" edges to Group entities. +func (_u *AccountUpdate) RemoveGroups(v ...*Group) *AccountUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveGroupIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AccountUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AccountUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AccountUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if account.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized account.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := account.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := account.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Account.name": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := account.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Account.platform": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := account.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Account.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := account.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Account.status": %w`, err)} + } + } + if v, ok := _u.mutation.SessionWindowStatus(); ok { + if err := account.SessionWindowStatusValidator(v); err != nil { + return &ValidationError{Name: "session_window_status", err: fmt.Errorf(`ent: validator failed for field "Account.session_window_status": %w`, err)} + } + } + return nil +} + +func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(account.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(account.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(account.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(account.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(account.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(account.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Credentials(); ok { + _spec.SetField(account.FieldCredentials, field.TypeJSON, value) + } + if value, ok := _u.mutation.Extra(); ok { + _spec.SetField(account.FieldExtra, field.TypeJSON, value) + } + if value, ok := _u.mutation.ProxyID(); ok { + _spec.SetField(account.FieldProxyID, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedProxyID(); ok { + _spec.AddField(account.FieldProxyID, field.TypeInt64, value) + } + if _u.mutation.ProxyIDCleared() { + _spec.ClearField(account.FieldProxyID, field.TypeInt64) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(account.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(account.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(account.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.LastUsedAt(); ok { + _spec.SetField(account.FieldLastUsedAt, field.TypeTime, value) + } + if _u.mutation.LastUsedAtCleared() { + _spec.ClearField(account.FieldLastUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.Schedulable(); ok { + _spec.SetField(account.FieldSchedulable, field.TypeBool, value) + } + if value, ok := _u.mutation.RateLimitedAt(); ok { + _spec.SetField(account.FieldRateLimitedAt, field.TypeTime, value) + } + if _u.mutation.RateLimitedAtCleared() { + _spec.ClearField(account.FieldRateLimitedAt, field.TypeTime) + } + if value, ok := _u.mutation.RateLimitResetAt(); ok { + _spec.SetField(account.FieldRateLimitResetAt, field.TypeTime, value) + } + if _u.mutation.RateLimitResetAtCleared() { + _spec.ClearField(account.FieldRateLimitResetAt, field.TypeTime) + } + if value, ok := _u.mutation.OverloadUntil(); ok { + _spec.SetField(account.FieldOverloadUntil, field.TypeTime, value) + } + if _u.mutation.OverloadUntilCleared() { + _spec.ClearField(account.FieldOverloadUntil, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStart(); ok { + _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) + } + if _u.mutation.SessionWindowStartCleared() { + _spec.ClearField(account.FieldSessionWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowEnd(); ok { + _spec.SetField(account.FieldSessionWindowEnd, field.TypeTime, value) + } + if _u.mutation.SessionWindowEndCleared() { + _spec.ClearField(account.FieldSessionWindowEnd, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStatus(); ok { + _spec.SetField(account.FieldSessionWindowStatus, field.TypeString, value) + } + if _u.mutation.SessionWindowStatusCleared() { + _spec.ClearField(account.FieldSessionWindowStatus, field.TypeString) + } + if _u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !_u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{account.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AccountUpdateOne is the builder for updating a single Account entity. +type AccountUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AccountMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AccountUpdateOne) SetUpdatedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *AccountUpdateOne) SetDeletedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableDeletedAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *AccountUpdateOne) ClearDeletedAt() *AccountUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *AccountUpdateOne) SetName(v string) *AccountUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableName(v *string) *AccountUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *AccountUpdateOne) SetPlatform(v string) *AccountUpdateOne { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillablePlatform(v *string) *AccountUpdateOne { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *AccountUpdateOne) SetType(v string) *AccountUpdateOne { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableType(v *string) *AccountUpdateOne { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetCredentials sets the "credentials" field. +func (_u *AccountUpdateOne) SetCredentials(v map[string]interface{}) *AccountUpdateOne { + _u.mutation.SetCredentials(v) + return _u +} + +// SetExtra sets the "extra" field. +func (_u *AccountUpdateOne) SetExtra(v map[string]interface{}) *AccountUpdateOne { + _u.mutation.SetExtra(v) + return _u +} + +// SetProxyID sets the "proxy_id" field. +func (_u *AccountUpdateOne) SetProxyID(v int64) *AccountUpdateOne { + _u.mutation.ResetProxyID() + _u.mutation.SetProxyID(v) + return _u +} + +// SetNillableProxyID sets the "proxy_id" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableProxyID(v *int64) *AccountUpdateOne { + if v != nil { + _u.SetProxyID(*v) + } + return _u +} + +// AddProxyID adds value to the "proxy_id" field. +func (_u *AccountUpdateOne) AddProxyID(v int64) *AccountUpdateOne { + _u.mutation.AddProxyID(v) + return _u +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (_u *AccountUpdateOne) ClearProxyID() *AccountUpdateOne { + _u.mutation.ClearProxyID() + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *AccountUpdateOne) SetConcurrency(v int) *AccountUpdateOne { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableConcurrency(v *int) *AccountUpdateOne { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *AccountUpdateOne) AddConcurrency(v int) *AccountUpdateOne { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountUpdateOne) SetPriority(v int) *AccountUpdateOne { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillablePriority(v *int) *AccountUpdateOne { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountUpdateOne) AddPriority(v int) *AccountUpdateOne { + _u.mutation.AddPriority(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *AccountUpdateOne) SetStatus(v string) *AccountUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableStatus(v *string) *AccountUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *AccountUpdateOne) SetErrorMessage(v string) *AccountUpdateOne { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableErrorMessage(v *string) *AccountUpdateOne { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *AccountUpdateOne) ClearErrorMessage() *AccountUpdateOne { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetLastUsedAt sets the "last_used_at" field. +func (_u *AccountUpdateOne) SetLastUsedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetLastUsedAt(v) + return _u +} + +// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableLastUsedAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetLastUsedAt(*v) + } + return _u +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (_u *AccountUpdateOne) ClearLastUsedAt() *AccountUpdateOne { + _u.mutation.ClearLastUsedAt() + return _u +} + +// SetSchedulable sets the "schedulable" field. +func (_u *AccountUpdateOne) SetSchedulable(v bool) *AccountUpdateOne { + _u.mutation.SetSchedulable(v) + return _u +} + +// SetNillableSchedulable sets the "schedulable" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSchedulable(v *bool) *AccountUpdateOne { + if v != nil { + _u.SetSchedulable(*v) + } + return _u +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (_u *AccountUpdateOne) SetRateLimitedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetRateLimitedAt(v) + return _u +} + +// SetNillableRateLimitedAt sets the "rate_limited_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableRateLimitedAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetRateLimitedAt(*v) + } + return _u +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (_u *AccountUpdateOne) ClearRateLimitedAt() *AccountUpdateOne { + _u.mutation.ClearRateLimitedAt() + return _u +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (_u *AccountUpdateOne) SetRateLimitResetAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetRateLimitResetAt(v) + return _u +} + +// SetNillableRateLimitResetAt sets the "rate_limit_reset_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableRateLimitResetAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetRateLimitResetAt(*v) + } + return _u +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (_u *AccountUpdateOne) ClearRateLimitResetAt() *AccountUpdateOne { + _u.mutation.ClearRateLimitResetAt() + return _u +} + +// SetOverloadUntil sets the "overload_until" field. +func (_u *AccountUpdateOne) SetOverloadUntil(v time.Time) *AccountUpdateOne { + _u.mutation.SetOverloadUntil(v) + return _u +} + +// SetNillableOverloadUntil sets the "overload_until" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableOverloadUntil(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetOverloadUntil(*v) + } + return _u +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (_u *AccountUpdateOne) ClearOverloadUntil() *AccountUpdateOne { + _u.mutation.ClearOverloadUntil() + return _u +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (_u *AccountUpdateOne) SetSessionWindowStart(v time.Time) *AccountUpdateOne { + _u.mutation.SetSessionWindowStart(v) + return _u +} + +// SetNillableSessionWindowStart sets the "session_window_start" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSessionWindowStart(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetSessionWindowStart(*v) + } + return _u +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (_u *AccountUpdateOne) ClearSessionWindowStart() *AccountUpdateOne { + _u.mutation.ClearSessionWindowStart() + return _u +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (_u *AccountUpdateOne) SetSessionWindowEnd(v time.Time) *AccountUpdateOne { + _u.mutation.SetSessionWindowEnd(v) + return _u +} + +// SetNillableSessionWindowEnd sets the "session_window_end" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSessionWindowEnd(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetSessionWindowEnd(*v) + } + return _u +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (_u *AccountUpdateOne) ClearSessionWindowEnd() *AccountUpdateOne { + _u.mutation.ClearSessionWindowEnd() + return _u +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (_u *AccountUpdateOne) SetSessionWindowStatus(v string) *AccountUpdateOne { + _u.mutation.SetSessionWindowStatus(v) + return _u +} + +// SetNillableSessionWindowStatus sets the "session_window_status" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSessionWindowStatus(v *string) *AccountUpdateOne { + if v != nil { + _u.SetSessionWindowStatus(*v) + } + return _u +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (_u *AccountUpdateOne) ClearSessionWindowStatus() *AccountUpdateOne { + _u.mutation.ClearSessionWindowStatus() + return _u +} + +// AddGroupIDs adds the "groups" edge to the Group entity by IDs. +func (_u *AccountUpdateOne) AddGroupIDs(ids ...int64) *AccountUpdateOne { + _u.mutation.AddGroupIDs(ids...) + return _u +} + +// AddGroups adds the "groups" edges to the Group entity. +func (_u *AccountUpdateOne) AddGroups(v ...*Group) *AccountUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddGroupIDs(ids...) +} + +// Mutation returns the AccountMutation object of the builder. +func (_u *AccountUpdateOne) Mutation() *AccountMutation { + return _u.mutation +} + +// ClearGroups clears all "groups" edges to the Group entity. +func (_u *AccountUpdateOne) ClearGroups() *AccountUpdateOne { + _u.mutation.ClearGroups() + return _u +} + +// RemoveGroupIDs removes the "groups" edge to Group entities by IDs. +func (_u *AccountUpdateOne) RemoveGroupIDs(ids ...int64) *AccountUpdateOne { + _u.mutation.RemoveGroupIDs(ids...) + return _u +} + +// RemoveGroups removes "groups" edges to Group entities. +func (_u *AccountUpdateOne) RemoveGroups(v ...*Group) *AccountUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveGroupIDs(ids...) +} + +// Where appends a list predicates to the AccountUpdate builder. +func (_u *AccountUpdateOne) Where(ps ...predicate.Account) *AccountUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AccountUpdateOne) Select(field string, fields ...string) *AccountUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Account entity. +func (_u *AccountUpdateOne) Save(ctx context.Context) (*Account, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountUpdateOne) SaveX(ctx context.Context) *Account { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AccountUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AccountUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if account.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized account.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := account.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := account.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Account.name": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := account.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Account.platform": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := account.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Account.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := account.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Account.status": %w`, err)} + } + } + if v, ok := _u.mutation.SessionWindowStatus(); ok { + if err := account.SessionWindowStatusValidator(v); err != nil { + return &ValidationError{Name: "session_window_status", err: fmt.Errorf(`ent: validator failed for field "Account.session_window_status": %w`, err)} + } + } + return nil +} + +func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Account.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, account.FieldID) + for _, f := range fields { + if !account.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != account.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(account.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(account.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(account.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(account.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(account.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(account.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Credentials(); ok { + _spec.SetField(account.FieldCredentials, field.TypeJSON, value) + } + if value, ok := _u.mutation.Extra(); ok { + _spec.SetField(account.FieldExtra, field.TypeJSON, value) + } + if value, ok := _u.mutation.ProxyID(); ok { + _spec.SetField(account.FieldProxyID, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedProxyID(); ok { + _spec.AddField(account.FieldProxyID, field.TypeInt64, value) + } + if _u.mutation.ProxyIDCleared() { + _spec.ClearField(account.FieldProxyID, field.TypeInt64) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(account.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(account.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(account.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.LastUsedAt(); ok { + _spec.SetField(account.FieldLastUsedAt, field.TypeTime, value) + } + if _u.mutation.LastUsedAtCleared() { + _spec.ClearField(account.FieldLastUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.Schedulable(); ok { + _spec.SetField(account.FieldSchedulable, field.TypeBool, value) + } + if value, ok := _u.mutation.RateLimitedAt(); ok { + _spec.SetField(account.FieldRateLimitedAt, field.TypeTime, value) + } + if _u.mutation.RateLimitedAtCleared() { + _spec.ClearField(account.FieldRateLimitedAt, field.TypeTime) + } + if value, ok := _u.mutation.RateLimitResetAt(); ok { + _spec.SetField(account.FieldRateLimitResetAt, field.TypeTime, value) + } + if _u.mutation.RateLimitResetAtCleared() { + _spec.ClearField(account.FieldRateLimitResetAt, field.TypeTime) + } + if value, ok := _u.mutation.OverloadUntil(); ok { + _spec.SetField(account.FieldOverloadUntil, field.TypeTime, value) + } + if _u.mutation.OverloadUntilCleared() { + _spec.ClearField(account.FieldOverloadUntil, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStart(); ok { + _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) + } + if _u.mutation.SessionWindowStartCleared() { + _spec.ClearField(account.FieldSessionWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowEnd(); ok { + _spec.SetField(account.FieldSessionWindowEnd, field.TypeTime, value) + } + if _u.mutation.SessionWindowEndCleared() { + _spec.ClearField(account.FieldSessionWindowEnd, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStatus(); ok { + _spec.SetField(account.FieldSessionWindowStatus, field.TypeString, value) + } + if _u.mutation.SessionWindowStatusCleared() { + _spec.ClearField(account.FieldSessionWindowStatus, field.TypeString) + } + if _u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !_u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Account{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{account.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/accountgroup.go b/backend/ent/accountgroup.go new file mode 100644 index 00000000..71d8a1f9 --- /dev/null +++ b/backend/ent/accountgroup.go @@ -0,0 +1,176 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" +) + +// AccountGroup is the model entity for the AccountGroup schema. +type AccountGroup struct { + config `json:"-"` + // AccountID holds the value of the "account_id" field. + AccountID int64 `json:"account_id,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID int64 `json:"group_id,omitempty"` + // Priority holds the value of the "priority" field. + Priority int `json:"priority,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AccountGroupQuery when eager-loading is set. + Edges AccountGroupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AccountGroupEdges holds the relations/edges for other nodes in the graph. +type AccountGroupEdges struct { + // Account holds the value of the account edge. + Account *Account `json:"account,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// AccountOrErr returns the Account value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AccountGroupEdges) AccountOrErr() (*Account, error) { + if e.Account != nil { + return e.Account, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: account.Label} + } + return nil, &NotLoadedError{edge: "account"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AccountGroupEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AccountGroup) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case accountgroup.FieldAccountID, accountgroup.FieldGroupID, accountgroup.FieldPriority: + values[i] = new(sql.NullInt64) + case accountgroup.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AccountGroup fields. +func (_m *AccountGroup) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case accountgroup.FieldAccountID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field account_id", values[i]) + } else if value.Valid { + _m.AccountID = value.Int64 + } + case accountgroup.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = value.Int64 + } + case accountgroup.FieldPriority: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field priority", values[i]) + } else if value.Valid { + _m.Priority = int(value.Int64) + } + case accountgroup.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AccountGroup. +// This includes values selected through modifiers, order, etc. +func (_m *AccountGroup) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAccount queries the "account" edge of the AccountGroup entity. +func (_m *AccountGroup) QueryAccount() *AccountQuery { + return NewAccountGroupClient(_m.config).QueryAccount(_m) +} + +// QueryGroup queries the "group" edge of the AccountGroup entity. +func (_m *AccountGroup) QueryGroup() *GroupQuery { + return NewAccountGroupClient(_m.config).QueryGroup(_m) +} + +// Update returns a builder for updating this AccountGroup. +// Note that you need to call AccountGroup.Unwrap() before calling this method if this AccountGroup +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *AccountGroup) Update() *AccountGroupUpdateOne { + return NewAccountGroupClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the AccountGroup entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *AccountGroup) Unwrap() *AccountGroup { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: AccountGroup is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *AccountGroup) String() string { + var builder strings.Builder + builder.WriteString("AccountGroup(") + builder.WriteString("account_id=") + builder.WriteString(fmt.Sprintf("%v", _m.AccountID)) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", _m.GroupID)) + builder.WriteString(", ") + builder.WriteString("priority=") + builder.WriteString(fmt.Sprintf("%v", _m.Priority)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// AccountGroups is a parsable slice of AccountGroup. +type AccountGroups []*AccountGroup diff --git a/backend/ent/accountgroup/accountgroup.go b/backend/ent/accountgroup/accountgroup.go new file mode 100644 index 00000000..5db485b6 --- /dev/null +++ b/backend/ent/accountgroup/accountgroup.go @@ -0,0 +1,123 @@ +// Code generated by ent, DO NOT EDIT. + +package accountgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the accountgroup type in the database. + Label = "account_group" + // FieldAccountID holds the string denoting the account_id field in the database. + FieldAccountID = "account_id" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldPriority holds the string denoting the priority field in the database. + FieldPriority = "priority" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeAccount holds the string denoting the account edge name in mutations. + EdgeAccount = "account" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // AccountFieldID holds the string denoting the ID field of the Account. + AccountFieldID = "id" + // GroupFieldID holds the string denoting the ID field of the Group. + GroupFieldID = "id" + // Table holds the table name of the accountgroup in the database. + Table = "account_groups" + // AccountTable is the table that holds the account relation/edge. + AccountTable = "account_groups" + // AccountInverseTable is the table name for the Account entity. + // It exists in this package in order to avoid circular dependency with the "account" package. + AccountInverseTable = "accounts" + // AccountColumn is the table column denoting the account relation/edge. + AccountColumn = "account_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "account_groups" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for accountgroup fields. +var Columns = []string{ + FieldAccountID, + FieldGroupID, + FieldPriority, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultPriority holds the default value on creation for the "priority" field. + DefaultPriority int + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the AccountGroup queries. +type OrderOption func(*sql.Selector) + +// ByAccountID orders the results by the account_id field. +func ByAccountID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccountID, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByPriority orders the results by the priority field. +func ByPriority(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPriority, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByAccountField orders the results by account field. +func ByAccountField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newAccountStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, AccountColumn), + sqlgraph.To(AccountInverseTable, AccountFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.To(GroupInverseTable, GroupFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) +} diff --git a/backend/ent/accountgroup/where.go b/backend/ent/accountgroup/where.go new file mode 100644 index 00000000..8226856b --- /dev/null +++ b/backend/ent/accountgroup/where.go @@ -0,0 +1,212 @@ +// Code generated by ent, DO NOT EDIT. + +package accountgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountID applies equality check predicate on the "account_id" field. It's identical to AccountIDEQ. +func AccountID(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ. +func Priority(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// AccountIDEQ applies the EQ predicate on the "account_id" field. +func AccountIDEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v)) +} + +// AccountIDNEQ applies the NEQ predicate on the "account_id" field. +func AccountIDNEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldAccountID, v)) +} + +// AccountIDIn applies the In predicate on the "account_id" field. +func AccountIDIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldAccountID, vs...)) +} + +// AccountIDNotIn applies the NotIn predicate on the "account_id" field. +func AccountIDNotIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldAccountID, vs...)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// PriorityEQ applies the EQ predicate on the "priority" field. +func PriorityEQ(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v)) +} + +// PriorityNEQ applies the NEQ predicate on the "priority" field. +func PriorityNEQ(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldPriority, v)) +} + +// PriorityIn applies the In predicate on the "priority" field. +func PriorityIn(vs ...int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldPriority, vs...)) +} + +// PriorityNotIn applies the NotIn predicate on the "priority" field. +func PriorityNotIn(vs ...int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldPriority, vs...)) +} + +// PriorityGT applies the GT predicate on the "priority" field. +func PriorityGT(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGT(FieldPriority, v)) +} + +// PriorityGTE applies the GTE predicate on the "priority" field. +func PriorityGTE(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGTE(FieldPriority, v)) +} + +// PriorityLT applies the LT predicate on the "priority" field. +func PriorityLT(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLT(FieldPriority, v)) +} + +// PriorityLTE applies the LTE predicate on the "priority" field. +func PriorityLTE(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLTE(FieldPriority, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasAccount applies the HasEdge predicate on the "account" edge. +func HasAccount() predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, AccountColumn), + sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountWith applies the HasEdge predicate on the "account" edge with a given conditions (other predicates). +func HasAccountWith(preds ...predicate.Account) predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := newAccountStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AccountGroup) predicate.AccountGroup { + return predicate.AccountGroup(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AccountGroup) predicate.AccountGroup { + return predicate.AccountGroup(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AccountGroup) predicate.AccountGroup { + return predicate.AccountGroup(sql.NotPredicates(p)) +} diff --git a/backend/ent/accountgroup_create.go b/backend/ent/accountgroup_create.go new file mode 100644 index 00000000..6a1840a1 --- /dev/null +++ b/backend/ent/accountgroup_create.go @@ -0,0 +1,653 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" +) + +// AccountGroupCreate is the builder for creating a AccountGroup entity. +type AccountGroupCreate struct { + config + mutation *AccountGroupMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetAccountID sets the "account_id" field. +func (_c *AccountGroupCreate) SetAccountID(v int64) *AccountGroupCreate { + _c.mutation.SetAccountID(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *AccountGroupCreate) SetGroupID(v int64) *AccountGroupCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetPriority sets the "priority" field. +func (_c *AccountGroupCreate) SetPriority(v int) *AccountGroupCreate { + _c.mutation.SetPriority(v) + return _c +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_c *AccountGroupCreate) SetNillablePriority(v *int) *AccountGroupCreate { + if v != nil { + _c.SetPriority(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AccountGroupCreate) SetCreatedAt(v time.Time) *AccountGroupCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AccountGroupCreate) SetNillableCreatedAt(v *time.Time) *AccountGroupCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetAccount sets the "account" edge to the Account entity. +func (_c *AccountGroupCreate) SetAccount(v *Account) *AccountGroupCreate { + return _c.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *AccountGroupCreate) SetGroup(v *Group) *AccountGroupCreate { + return _c.SetGroupID(v.ID) +} + +// Mutation returns the AccountGroupMutation object of the builder. +func (_c *AccountGroupCreate) Mutation() *AccountGroupMutation { + return _c.mutation +} + +// Save creates the AccountGroup in the database. +func (_c *AccountGroupCreate) Save(ctx context.Context) (*AccountGroup, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AccountGroupCreate) SaveX(ctx context.Context) *AccountGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountGroupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountGroupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AccountGroupCreate) defaults() { + if _, ok := _c.mutation.Priority(); !ok { + v := accountgroup.DefaultPriority + _c.mutation.SetPriority(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := accountgroup.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AccountGroupCreate) check() error { + if _, ok := _c.mutation.AccountID(); !ok { + return &ValidationError{Name: "account_id", err: errors.New(`ent: missing required field "AccountGroup.account_id"`)} + } + if _, ok := _c.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "AccountGroup.group_id"`)} + } + if _, ok := _c.mutation.Priority(); !ok { + return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "AccountGroup.priority"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AccountGroup.created_at"`)} + } + if len(_c.mutation.AccountIDs()) == 0 { + return &ValidationError{Name: "account", err: errors.New(`ent: missing required edge "AccountGroup.account"`)} + } + if len(_c.mutation.GroupIDs()) == 0 { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "AccountGroup.group"`)} + } + return nil +} + +func (_c *AccountGroupCreate) sqlSave(ctx context.Context) (*AccountGroup, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} + +func (_c *AccountGroupCreate) createSpec() (*AccountGroup, *sqlgraph.CreateSpec) { + var ( + _node = &AccountGroup{config: _c.config} + _spec = sqlgraph.NewCreateSpec(accountgroup.Table, nil) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Priority(); ok { + _spec.SetField(accountgroup.FieldPriority, field.TypeInt, value) + _node.Priority = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(accountgroup.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AccountID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AccountGroup.Create(). +// SetAccountID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountGroupUpsert) { +// SetAccountID(v+v). +// }). +// Exec(ctx) +func (_c *AccountGroupCreate) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertOne { + _c.conflict = opts + return &AccountGroupUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountGroupCreate) OnConflictColumns(columns ...string) *AccountGroupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountGroupUpsertOne{ + create: _c, + } +} + +type ( + // AccountGroupUpsertOne is the builder for "upsert"-ing + // one AccountGroup node. + AccountGroupUpsertOne struct { + create *AccountGroupCreate + } + + // AccountGroupUpsert is the "OnConflict" setter. + AccountGroupUpsert struct { + *sql.UpdateSet + } +) + +// SetAccountID sets the "account_id" field. +func (u *AccountGroupUpsert) SetAccountID(v int64) *AccountGroupUpsert { + u.Set(accountgroup.FieldAccountID, v) + return u +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *AccountGroupUpsert) UpdateAccountID() *AccountGroupUpsert { + u.SetExcluded(accountgroup.FieldAccountID) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *AccountGroupUpsert) SetGroupID(v int64) *AccountGroupUpsert { + u.Set(accountgroup.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *AccountGroupUpsert) UpdateGroupID() *AccountGroupUpsert { + u.SetExcluded(accountgroup.FieldGroupID) + return u +} + +// SetPriority sets the "priority" field. +func (u *AccountGroupUpsert) SetPriority(v int) *AccountGroupUpsert { + u.Set(accountgroup.FieldPriority, v) + return u +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountGroupUpsert) UpdatePriority() *AccountGroupUpsert { + u.SetExcluded(accountgroup.FieldPriority) + return u +} + +// AddPriority adds v to the "priority" field. +func (u *AccountGroupUpsert) AddPriority(v int) *AccountGroupUpsert { + u.Add(accountgroup.FieldPriority, v) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountGroupUpsertOne) UpdateNewValues() *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(accountgroup.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountGroupUpsertOne) Ignore() *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountGroupUpsertOne) DoNothing() *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountGroupCreate.OnConflict +// documentation for more info. +func (u *AccountGroupUpsertOne) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetAccountID sets the "account_id" field. +func (u *AccountGroupUpsertOne) SetAccountID(v int64) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.SetAccountID(v) + }) +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *AccountGroupUpsertOne) UpdateAccountID() *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateAccountID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *AccountGroupUpsertOne) SetGroupID(v int64) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *AccountGroupUpsertOne) UpdateGroupID() *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateGroupID() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountGroupUpsertOne) SetPriority(v int) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountGroupUpsertOne) AddPriority(v int) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountGroupUpsertOne) UpdatePriority() *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdatePriority() + }) +} + +// Exec executes the query. +func (u *AccountGroupUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountGroupCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountGroupUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// AccountGroupCreateBulk is the builder for creating many AccountGroup entities in bulk. +type AccountGroupCreateBulk struct { + config + err error + builders []*AccountGroupCreate + conflict []sql.ConflictOption +} + +// Save creates the AccountGroup entities in the database. +func (_c *AccountGroupCreateBulk) Save(ctx context.Context) ([]*AccountGroup, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*AccountGroup, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AccountGroupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AccountGroupCreateBulk) SaveX(ctx context.Context) []*AccountGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountGroupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountGroupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AccountGroup.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountGroupUpsert) { +// SetAccountID(v+v). +// }). +// Exec(ctx) +func (_c *AccountGroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertBulk { + _c.conflict = opts + return &AccountGroupUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountGroupCreateBulk) OnConflictColumns(columns ...string) *AccountGroupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountGroupUpsertBulk{ + create: _c, + } +} + +// AccountGroupUpsertBulk is the builder for "upsert"-ing +// a bulk of AccountGroup nodes. +type AccountGroupUpsertBulk struct { + create *AccountGroupCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountGroupUpsertBulk) UpdateNewValues() *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(accountgroup.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountGroupUpsertBulk) Ignore() *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountGroupUpsertBulk) DoNothing() *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountGroupCreateBulk.OnConflict +// documentation for more info. +func (u *AccountGroupUpsertBulk) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetAccountID sets the "account_id" field. +func (u *AccountGroupUpsertBulk) SetAccountID(v int64) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.SetAccountID(v) + }) +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *AccountGroupUpsertBulk) UpdateAccountID() *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateAccountID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *AccountGroupUpsertBulk) SetGroupID(v int64) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *AccountGroupUpsertBulk) UpdateGroupID() *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateGroupID() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountGroupUpsertBulk) SetPriority(v int) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountGroupUpsertBulk) AddPriority(v int) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountGroupUpsertBulk) UpdatePriority() *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdatePriority() + }) +} + +// Exec executes the query. +func (u *AccountGroupUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AccountGroupCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountGroupCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountGroupUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/accountgroup_delete.go b/backend/ent/accountgroup_delete.go new file mode 100644 index 00000000..41f65ad6 --- /dev/null +++ b/backend/ent/accountgroup_delete.go @@ -0,0 +1,87 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountGroupDelete is the builder for deleting a AccountGroup entity. +type AccountGroupDelete struct { + config + hooks []Hook + mutation *AccountGroupMutation +} + +// Where appends a list predicates to the AccountGroupDelete builder. +func (_d *AccountGroupDelete) Where(ps ...predicate.AccountGroup) *AccountGroupDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AccountGroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountGroupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AccountGroupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(accountgroup.Table, nil) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AccountGroupDeleteOne is the builder for deleting a single AccountGroup entity. +type AccountGroupDeleteOne struct { + _d *AccountGroupDelete +} + +// Where appends a list predicates to the AccountGroupDelete builder. +func (_d *AccountGroupDeleteOne) Where(ps ...predicate.AccountGroup) *AccountGroupDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AccountGroupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{accountgroup.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountGroupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/accountgroup_query.go b/backend/ent/accountgroup_query.go new file mode 100644 index 00000000..98e1c3f6 --- /dev/null +++ b/backend/ent/accountgroup_query.go @@ -0,0 +1,603 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountGroupQuery is the builder for querying AccountGroup entities. +type AccountGroupQuery struct { + config + ctx *QueryContext + order []accountgroup.OrderOption + inters []Interceptor + predicates []predicate.AccountGroup + withAccount *AccountQuery + withGroup *GroupQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AccountGroupQuery builder. +func (_q *AccountGroupQuery) Where(ps ...predicate.AccountGroup) *AccountGroupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AccountGroupQuery) Limit(limit int) *AccountGroupQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AccountGroupQuery) Offset(offset int) *AccountGroupQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AccountGroupQuery) Unique(unique bool) *AccountGroupQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AccountGroupQuery) Order(o ...accountgroup.OrderOption) *AccountGroupQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAccount chains the current query on the "account" edge. +func (_q *AccountGroupQuery) QueryAccount() *AccountQuery { + query := (&AccountClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(accountgroup.Table, accountgroup.AccountColumn, selector), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.AccountTable, accountgroup.AccountColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *AccountGroupQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(accountgroup.Table, accountgroup.GroupColumn, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.GroupTable, accountgroup.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first AccountGroup entity from the query. +// Returns a *NotFoundError when no AccountGroup was found. +func (_q *AccountGroupQuery) First(ctx context.Context) (*AccountGroup, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{accountgroup.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AccountGroupQuery) FirstX(ctx context.Context) *AccountGroup { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// Only returns a single AccountGroup entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AccountGroup entity is found. +// Returns a *NotFoundError when no AccountGroup entities are found. +func (_q *AccountGroupQuery) Only(ctx context.Context) (*AccountGroup, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{accountgroup.Label} + default: + return nil, &NotSingularError{accountgroup.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AccountGroupQuery) OnlyX(ctx context.Context) *AccountGroup { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// All executes the query and returns a list of AccountGroups. +func (_q *AccountGroupQuery) All(ctx context.Context) ([]*AccountGroup, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AccountGroup, *AccountGroupQuery]() + return withInterceptors[[]*AccountGroup](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AccountGroupQuery) AllX(ctx context.Context) []*AccountGroup { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// Count returns the count of the given query. +func (_q *AccountGroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AccountGroupQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AccountGroupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AccountGroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.First(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AccountGroupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AccountGroupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AccountGroupQuery) Clone() *AccountGroupQuery { + if _q == nil { + return nil + } + return &AccountGroupQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]accountgroup.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.AccountGroup{}, _q.predicates...), + withAccount: _q.withAccount.Clone(), + withGroup: _q.withGroup.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAccount tells the query-builder to eager-load the nodes that are connected to +// the "account" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountGroupQuery) WithAccount(opts ...func(*AccountQuery)) *AccountGroupQuery { + query := (&AccountClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccount = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountGroupQuery) WithGroup(opts ...func(*GroupQuery)) *AccountGroupQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// AccountID int64 `json:"account_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AccountGroup.Query(). +// GroupBy(accountgroup.FieldAccountID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AccountGroupQuery) GroupBy(field string, fields ...string) *AccountGroupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AccountGroupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = accountgroup.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// AccountID int64 `json:"account_id,omitempty"` +// } +// +// client.AccountGroup.Query(). +// Select(accountgroup.FieldAccountID). +// Scan(ctx, &v) +func (_q *AccountGroupQuery) Select(fields ...string) *AccountGroupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AccountGroupSelect{AccountGroupQuery: _q} + sbuild.label = accountgroup.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AccountGroupSelect configured with the given aggregations. +func (_q *AccountGroupQuery) Aggregate(fns ...AggregateFunc) *AccountGroupSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AccountGroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !accountgroup.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AccountGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AccountGroup, error) { + var ( + nodes = []*AccountGroup{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withAccount != nil, + _q.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AccountGroup).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AccountGroup{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAccount; query != nil { + if err := _q.loadAccount(ctx, query, nodes, nil, + func(n *AccountGroup, e *Account) { n.Edges.Account = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *AccountGroup, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AccountGroupQuery) loadAccount(ctx context.Context, query *AccountQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Account)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AccountGroup) + for i := range nodes { + fk := nodes[i].AccountID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(account.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "account_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *AccountGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AccountGroup) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *AccountGroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Unique = false + _spec.Node.Columns = nil + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AccountGroupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(accountgroup.Table, accountgroup.Columns, nil) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + for i := range fields { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + if _q.withAccount != nil { + _spec.Node.AddColumnOnce(accountgroup.FieldAccountID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(accountgroup.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(accountgroup.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = accountgroup.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AccountGroupGroupBy is the group-by builder for AccountGroup entities. +type AccountGroupGroupBy struct { + selector + build *AccountGroupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AccountGroupGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AccountGroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountGroupQuery, *AccountGroupGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AccountGroupGroupBy) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AccountGroupSelect is the builder for selecting fields of AccountGroup entities. +type AccountGroupSelect struct { + *AccountGroupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AccountGroupSelect) Aggregate(fns ...AggregateFunc) *AccountGroupSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AccountGroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountGroupQuery, *AccountGroupSelect](ctx, _s.AccountGroupQuery, _s, _s.inters, v) +} + +func (_s *AccountGroupSelect) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/accountgroup_update.go b/backend/ent/accountgroup_update.go new file mode 100644 index 00000000..fd7b5430 --- /dev/null +++ b/backend/ent/accountgroup_update.go @@ -0,0 +1,477 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountGroupUpdate is the builder for updating AccountGroup entities. +type AccountGroupUpdate struct { + config + hooks []Hook + mutation *AccountGroupMutation +} + +// Where appends a list predicates to the AccountGroupUpdate builder. +func (_u *AccountGroupUpdate) Where(ps ...predicate.AccountGroup) *AccountGroupUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetAccountID sets the "account_id" field. +func (_u *AccountGroupUpdate) SetAccountID(v int64) *AccountGroupUpdate { + _u.mutation.SetAccountID(v) + return _u +} + +// SetNillableAccountID sets the "account_id" field if the given value is not nil. +func (_u *AccountGroupUpdate) SetNillableAccountID(v *int64) *AccountGroupUpdate { + if v != nil { + _u.SetAccountID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *AccountGroupUpdate) SetGroupID(v int64) *AccountGroupUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *AccountGroupUpdate) SetNillableGroupID(v *int64) *AccountGroupUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountGroupUpdate) SetPriority(v int) *AccountGroupUpdate { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountGroupUpdate) SetNillablePriority(v *int) *AccountGroupUpdate { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountGroupUpdate) AddPriority(v int) *AccountGroupUpdate { + _u.mutation.AddPriority(v) + return _u +} + +// SetAccount sets the "account" edge to the Account entity. +func (_u *AccountGroupUpdate) SetAccount(v *Account) *AccountGroupUpdate { + return _u.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *AccountGroupUpdate) SetGroup(v *Group) *AccountGroupUpdate { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the AccountGroupMutation object of the builder. +func (_u *AccountGroupUpdate) Mutation() *AccountGroupMutation { + return _u.mutation +} + +// ClearAccount clears the "account" edge to the Account entity. +func (_u *AccountGroupUpdate) ClearAccount() *AccountGroupUpdate { + _u.mutation.ClearAccount() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *AccountGroupUpdate) ClearGroup() *AccountGroupUpdate { + _u.mutation.ClearGroup() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AccountGroupUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountGroupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AccountGroupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountGroupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountGroupUpdate) check() error { + if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`) + } + return nil +} + +func (_u *AccountGroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(accountgroup.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(accountgroup.FieldPriority, field.TypeInt, value) + } + if _u.mutation.AccountCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{accountgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AccountGroupUpdateOne is the builder for updating a single AccountGroup entity. +type AccountGroupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AccountGroupMutation +} + +// SetAccountID sets the "account_id" field. +func (_u *AccountGroupUpdateOne) SetAccountID(v int64) *AccountGroupUpdateOne { + _u.mutation.SetAccountID(v) + return _u +} + +// SetNillableAccountID sets the "account_id" field if the given value is not nil. +func (_u *AccountGroupUpdateOne) SetNillableAccountID(v *int64) *AccountGroupUpdateOne { + if v != nil { + _u.SetAccountID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *AccountGroupUpdateOne) SetGroupID(v int64) *AccountGroupUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *AccountGroupUpdateOne) SetNillableGroupID(v *int64) *AccountGroupUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountGroupUpdateOne) SetPriority(v int) *AccountGroupUpdateOne { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountGroupUpdateOne) SetNillablePriority(v *int) *AccountGroupUpdateOne { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountGroupUpdateOne) AddPriority(v int) *AccountGroupUpdateOne { + _u.mutation.AddPriority(v) + return _u +} + +// SetAccount sets the "account" edge to the Account entity. +func (_u *AccountGroupUpdateOne) SetAccount(v *Account) *AccountGroupUpdateOne { + return _u.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *AccountGroupUpdateOne) SetGroup(v *Group) *AccountGroupUpdateOne { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the AccountGroupMutation object of the builder. +func (_u *AccountGroupUpdateOne) Mutation() *AccountGroupMutation { + return _u.mutation +} + +// ClearAccount clears the "account" edge to the Account entity. +func (_u *AccountGroupUpdateOne) ClearAccount() *AccountGroupUpdateOne { + _u.mutation.ClearAccount() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *AccountGroupUpdateOne) ClearGroup() *AccountGroupUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// Where appends a list predicates to the AccountGroupUpdate builder. +func (_u *AccountGroupUpdateOne) Where(ps ...predicate.AccountGroup) *AccountGroupUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AccountGroupUpdateOne) Select(field string, fields ...string) *AccountGroupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated AccountGroup entity. +func (_u *AccountGroupUpdateOne) Save(ctx context.Context) (*AccountGroup, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountGroupUpdateOne) SaveX(ctx context.Context) *AccountGroup { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AccountGroupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountGroupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountGroupUpdateOne) check() error { + if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`) + } + return nil +} + +func (_u *AccountGroupUpdateOne) sqlSave(ctx context.Context) (_node *AccountGroup, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64)) + if id, ok := _u.mutation.AccountID(); !ok { + return nil, &ValidationError{Name: "account_id", err: errors.New(`ent: missing "AccountGroup.account_id" for update`)} + } else { + _spec.Node.CompositeID[0].Value = id + } + if id, ok := _u.mutation.GroupID(); !ok { + return nil, &ValidationError{Name: "group_id", err: errors.New(`ent: missing "AccountGroup.group_id" for update`)} + } else { + _spec.Node.CompositeID[1].Value = id + } + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, len(fields)) + for i, f := range fields { + if !accountgroup.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + _spec.Node.Columns[i] = f + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(accountgroup.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(accountgroup.FieldPriority, field.TypeInt, value) + } + if _u.mutation.AccountCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &AccountGroup{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{accountgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/apikey.go b/backend/ent/apikey.go new file mode 100644 index 00000000..30cf9b4d --- /dev/null +++ b/backend/ent/apikey.go @@ -0,0 +1,237 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// ApiKey is the model entity for the ApiKey schema. +type ApiKey struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID *int64 `json:"group_id,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ApiKeyQuery when eager-loading is set. + Edges ApiKeyEdges `json:"edges"` + selectValues sql.SelectValues +} + +// ApiKeyEdges holds the relations/edges for other nodes in the graph. +type ApiKeyEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ApiKeyEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ApiKeyEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*ApiKey) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID: + values[i] = new(sql.NullInt64) + case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus: + values[i] = new(sql.NullString) + case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the ApiKey fields. +func (_m *ApiKey) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case apikey.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case apikey.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case apikey.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case apikey.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case apikey.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case apikey.FieldKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[i]) + } else if value.Valid { + _m.Key = value.String + } + case apikey.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case apikey.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = new(int64) + *_m.GroupID = value.Int64 + } + case apikey.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the ApiKey. +// This includes values selected through modifiers, order, etc. +func (_m *ApiKey) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the ApiKey entity. +func (_m *ApiKey) QueryUser() *UserQuery { + return NewApiKeyClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the ApiKey entity. +func (_m *ApiKey) QueryGroup() *GroupQuery { + return NewApiKeyClient(_m.config).QueryGroup(_m) +} + +// Update returns a builder for updating this ApiKey. +// Note that you need to call ApiKey.Unwrap() before calling this method if this ApiKey +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *ApiKey) Update() *ApiKeyUpdateOne { + return NewApiKeyClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the ApiKey entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *ApiKey) Unwrap() *ApiKey { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: ApiKey is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *ApiKey) String() string { + var builder strings.Builder + builder.WriteString("ApiKey(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("key=") + builder.WriteString(_m.Key) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + if v := _m.GroupID; v != nil { + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteByte(')') + return builder.String() +} + +// ApiKeys is a parsable slice of ApiKey. +type ApiKeys []*ApiKey diff --git a/backend/ent/apikey/apikey.go b/backend/ent/apikey/apikey.go new file mode 100644 index 00000000..4eba5f53 --- /dev/null +++ b/backend/ent/apikey/apikey.go @@ -0,0 +1,177 @@ +// Code generated by ent, DO NOT EDIT. + +package apikey + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the apikey type in the database. + Label = "api_key" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // Table holds the table name of the apikey in the database. + Table = "api_keys" + // UserTable is the table that holds the user relation/edge. + UserTable = "api_keys" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "api_keys" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for apikey fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldUserID, + FieldKey, + FieldName, + FieldGroupID, + FieldStatus, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // KeyValidator is a validator for the "key" field. It is called by the builders before save. + KeyValidator func(string) error + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error +) + +// OrderOption defines the ordering options for the ApiKey queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} diff --git a/backend/ent/apikey/where.go b/backend/ent/apikey/where.go new file mode 100644 index 00000000..11cabd3f --- /dev/null +++ b/backend/ent/apikey/where.go @@ -0,0 +1,532 @@ +// Code generated by ent, DO NOT EDIT. + +package apikey + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldDeletedAt, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldUserID, v)) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldKey, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldName, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldGroupID, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldStatus, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.ApiKey { + return predicate.ApiKey(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotNull(FieldDeletedAt)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldUserID, vs...)) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldKey, v)) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldKey, v)) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldKey, vs...)) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldKey, vs...)) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGT(FieldKey, v)) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGTE(FieldKey, v)) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLT(FieldKey, v)) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLTE(FieldKey, v)) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldContains(FieldKey, v)) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldHasPrefix(FieldKey, v)) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldHasSuffix(FieldKey, v)) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEqualFold(FieldKey, v)) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldContainsFold(FieldKey, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldContainsFold(FieldName, v)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// GroupIDIsNil applies the IsNil predicate on the "group_id" field. +func GroupIDIsNil() predicate.ApiKey { + return predicate.ApiKey(sql.FieldIsNull(FieldGroupID)) +} + +// GroupIDNotNil applies the NotNil predicate on the "group_id" field. +func GroupIDNotNil() predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotNull(FieldGroupID)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.ApiKey { + return predicate.ApiKey(sql.FieldContainsFold(FieldStatus, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.ApiKey { + return predicate.ApiKey(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.ApiKey { + return predicate.ApiKey(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.ApiKey { + return predicate.ApiKey(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.ApiKey { + return predicate.ApiKey(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.ApiKey) predicate.ApiKey { + return predicate.ApiKey(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.ApiKey) predicate.ApiKey { + return predicate.ApiKey(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.ApiKey) predicate.ApiKey { + return predicate.ApiKey(sql.NotPredicates(p)) +} diff --git a/backend/ent/apikey_create.go b/backend/ent/apikey_create.go new file mode 100644 index 00000000..8d7ddb69 --- /dev/null +++ b/backend/ent/apikey_create.go @@ -0,0 +1,955 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// ApiKeyCreate is the builder for creating a ApiKey entity. +type ApiKeyCreate struct { + config + mutation *ApiKeyMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *ApiKeyCreate) SetCreatedAt(v time.Time) *ApiKeyCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *ApiKeyCreate) SetNillableCreatedAt(v *time.Time) *ApiKeyCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *ApiKeyCreate) SetUpdatedAt(v time.Time) *ApiKeyCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *ApiKeyCreate) SetNillableUpdatedAt(v *time.Time) *ApiKeyCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *ApiKeyCreate) SetDeletedAt(v time.Time) *ApiKeyCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *ApiKeyCreate) SetNillableDeletedAt(v *time.Time) *ApiKeyCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *ApiKeyCreate) SetUserID(v int64) *ApiKeyCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetKey sets the "key" field. +func (_c *ApiKeyCreate) SetKey(v string) *ApiKeyCreate { + _c.mutation.SetKey(v) + return _c +} + +// SetName sets the "name" field. +func (_c *ApiKeyCreate) SetName(v string) *ApiKeyCreate { + _c.mutation.SetName(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *ApiKeyCreate) SetGroupID(v int64) *ApiKeyCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_c *ApiKeyCreate) SetNillableGroupID(v *int64) *ApiKeyCreate { + if v != nil { + _c.SetGroupID(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *ApiKeyCreate) SetStatus(v string) *ApiKeyCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *ApiKeyCreate) SetNillableStatus(v *string) *ApiKeyCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *ApiKeyCreate) SetUser(v *User) *ApiKeyCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *ApiKeyCreate) SetGroup(v *Group) *ApiKeyCreate { + return _c.SetGroupID(v.ID) +} + +// Mutation returns the ApiKeyMutation object of the builder. +func (_c *ApiKeyCreate) Mutation() *ApiKeyMutation { + return _c.mutation +} + +// Save creates the ApiKey in the database. +func (_c *ApiKeyCreate) Save(ctx context.Context) (*ApiKey, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *ApiKeyCreate) SaveX(ctx context.Context) *ApiKey { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ApiKeyCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ApiKeyCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *ApiKeyCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if apikey.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := apikey.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if apikey.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := apikey.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := apikey.DefaultStatus + _c.mutation.SetStatus(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *ApiKeyCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ApiKey.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ApiKey.updated_at"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "ApiKey.user_id"`)} + } + if _, ok := _c.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "ApiKey.key"`)} + } + if v, ok := _c.mutation.Key(); ok { + if err := apikey.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "ApiKey.key": %w`, err)} + } + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ApiKey.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := apikey.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ApiKey.name": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "ApiKey.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := apikey.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ApiKey.status": %w`, err)} + } + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "ApiKey.user"`)} + } + return nil +} + +func (_c *ApiKeyCreate) sqlSave(ctx context.Context) (*ApiKey, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *ApiKeyCreate) createSpec() (*ApiKey, *sqlgraph.CreateSpec) { + var ( + _node = &ApiKey{config: _c.config} + _spec = sqlgraph.NewCreateSpec(apikey.Table, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(apikey.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Key(); ok { + _spec.SetField(apikey.FieldKey, field.TypeString, value) + _node.Key = value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(apikey.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(apikey.FieldStatus, field.TypeString, value) + _node.Status = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ApiKey.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ApiKeyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ApiKeyCreate) OnConflict(opts ...sql.ConflictOption) *ApiKeyUpsertOne { + _c.conflict = opts + return &ApiKeyUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ApiKey.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ApiKeyCreate) OnConflictColumns(columns ...string) *ApiKeyUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ApiKeyUpsertOne{ + create: _c, + } +} + +type ( + // ApiKeyUpsertOne is the builder for "upsert"-ing + // one ApiKey node. + ApiKeyUpsertOne struct { + create *ApiKeyCreate + } + + // ApiKeyUpsert is the "OnConflict" setter. + ApiKeyUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *ApiKeyUpsert) SetUpdatedAt(v time.Time) *ApiKeyUpsert { + u.Set(apikey.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ApiKeyUpsert) UpdateUpdatedAt() *ApiKeyUpsert { + u.SetExcluded(apikey.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ApiKeyUpsert) SetDeletedAt(v time.Time) *ApiKeyUpsert { + u.Set(apikey.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ApiKeyUpsert) UpdateDeletedAt() *ApiKeyUpsert { + u.SetExcluded(apikey.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ApiKeyUpsert) ClearDeletedAt() *ApiKeyUpsert { + u.SetNull(apikey.FieldDeletedAt) + return u +} + +// SetUserID sets the "user_id" field. +func (u *ApiKeyUpsert) SetUserID(v int64) *ApiKeyUpsert { + u.Set(apikey.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *ApiKeyUpsert) UpdateUserID() *ApiKeyUpsert { + u.SetExcluded(apikey.FieldUserID) + return u +} + +// SetKey sets the "key" field. +func (u *ApiKeyUpsert) SetKey(v string) *ApiKeyUpsert { + u.Set(apikey.FieldKey, v) + return u +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *ApiKeyUpsert) UpdateKey() *ApiKeyUpsert { + u.SetExcluded(apikey.FieldKey) + return u +} + +// SetName sets the "name" field. +func (u *ApiKeyUpsert) SetName(v string) *ApiKeyUpsert { + u.Set(apikey.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ApiKeyUpsert) UpdateName() *ApiKeyUpsert { + u.SetExcluded(apikey.FieldName) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *ApiKeyUpsert) SetGroupID(v int64) *ApiKeyUpsert { + u.Set(apikey.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *ApiKeyUpsert) UpdateGroupID() *ApiKeyUpsert { + u.SetExcluded(apikey.FieldGroupID) + return u +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *ApiKeyUpsert) ClearGroupID() *ApiKeyUpsert { + u.SetNull(apikey.FieldGroupID) + return u +} + +// SetStatus sets the "status" field. +func (u *ApiKeyUpsert) SetStatus(v string) *ApiKeyUpsert { + u.Set(apikey.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ApiKeyUpsert) UpdateStatus() *ApiKeyUpsert { + u.SetExcluded(apikey.FieldStatus) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.ApiKey.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ApiKeyUpsertOne) UpdateNewValues() *ApiKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(apikey.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ApiKey.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ApiKeyUpsertOne) Ignore() *ApiKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ApiKeyUpsertOne) DoNothing() *ApiKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ApiKeyCreate.OnConflict +// documentation for more info. +func (u *ApiKeyUpsertOne) Update(set func(*ApiKeyUpsert)) *ApiKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ApiKeyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ApiKeyUpsertOne) SetUpdatedAt(v time.Time) *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ApiKeyUpsertOne) UpdateUpdatedAt() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ApiKeyUpsertOne) SetDeletedAt(v time.Time) *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ApiKeyUpsertOne) UpdateDeletedAt() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ApiKeyUpsertOne) ClearDeletedAt() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *ApiKeyUpsertOne) SetUserID(v int64) *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *ApiKeyUpsertOne) UpdateUserID() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateUserID() + }) +} + +// SetKey sets the "key" field. +func (u *ApiKeyUpsertOne) SetKey(v string) *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *ApiKeyUpsertOne) UpdateKey() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *ApiKeyUpsertOne) SetName(v string) *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ApiKeyUpsertOne) UpdateName() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateName() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *ApiKeyUpsertOne) SetGroupID(v int64) *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *ApiKeyUpsertOne) UpdateGroupID() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *ApiKeyUpsertOne) ClearGroupID() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.ClearGroupID() + }) +} + +// SetStatus sets the "status" field. +func (u *ApiKeyUpsertOne) SetStatus(v string) *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ApiKeyUpsertOne) UpdateStatus() *ApiKeyUpsertOne { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *ApiKeyUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ApiKeyCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ApiKeyUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ApiKeyUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ApiKeyUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ApiKeyCreateBulk is the builder for creating many ApiKey entities in bulk. +type ApiKeyCreateBulk struct { + config + err error + builders []*ApiKeyCreate + conflict []sql.ConflictOption +} + +// Save creates the ApiKey entities in the database. +func (_c *ApiKeyCreateBulk) Save(ctx context.Context) ([]*ApiKey, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*ApiKey, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ApiKeyMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *ApiKeyCreateBulk) SaveX(ctx context.Context) []*ApiKey { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ApiKeyCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ApiKeyCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ApiKey.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ApiKeyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ApiKeyCreateBulk) OnConflict(opts ...sql.ConflictOption) *ApiKeyUpsertBulk { + _c.conflict = opts + return &ApiKeyUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ApiKey.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ApiKeyCreateBulk) OnConflictColumns(columns ...string) *ApiKeyUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ApiKeyUpsertBulk{ + create: _c, + } +} + +// ApiKeyUpsertBulk is the builder for "upsert"-ing +// a bulk of ApiKey nodes. +type ApiKeyUpsertBulk struct { + create *ApiKeyCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.ApiKey.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ApiKeyUpsertBulk) UpdateNewValues() *ApiKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(apikey.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ApiKey.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ApiKeyUpsertBulk) Ignore() *ApiKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ApiKeyUpsertBulk) DoNothing() *ApiKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ApiKeyCreateBulk.OnConflict +// documentation for more info. +func (u *ApiKeyUpsertBulk) Update(set func(*ApiKeyUpsert)) *ApiKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ApiKeyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ApiKeyUpsertBulk) SetUpdatedAt(v time.Time) *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ApiKeyUpsertBulk) UpdateUpdatedAt() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ApiKeyUpsertBulk) SetDeletedAt(v time.Time) *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ApiKeyUpsertBulk) UpdateDeletedAt() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ApiKeyUpsertBulk) ClearDeletedAt() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *ApiKeyUpsertBulk) SetUserID(v int64) *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *ApiKeyUpsertBulk) UpdateUserID() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateUserID() + }) +} + +// SetKey sets the "key" field. +func (u *ApiKeyUpsertBulk) SetKey(v string) *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *ApiKeyUpsertBulk) UpdateKey() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *ApiKeyUpsertBulk) SetName(v string) *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ApiKeyUpsertBulk) UpdateName() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateName() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *ApiKeyUpsertBulk) SetGroupID(v int64) *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *ApiKeyUpsertBulk) UpdateGroupID() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *ApiKeyUpsertBulk) ClearGroupID() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.ClearGroupID() + }) +} + +// SetStatus sets the "status" field. +func (u *ApiKeyUpsertBulk) SetStatus(v string) *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ApiKeyUpsertBulk) UpdateStatus() *ApiKeyUpsertBulk { + return u.Update(func(s *ApiKeyUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *ApiKeyUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ApiKeyCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ApiKeyCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ApiKeyUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/apikey_delete.go b/backend/ent/apikey_delete.go new file mode 100644 index 00000000..6e5c200c --- /dev/null +++ b/backend/ent/apikey_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ApiKeyDelete is the builder for deleting a ApiKey entity. +type ApiKeyDelete struct { + config + hooks []Hook + mutation *ApiKeyMutation +} + +// Where appends a list predicates to the ApiKeyDelete builder. +func (_d *ApiKeyDelete) Where(ps ...predicate.ApiKey) *ApiKeyDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *ApiKeyDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ApiKeyDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *ApiKeyDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(apikey.Table, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// ApiKeyDeleteOne is the builder for deleting a single ApiKey entity. +type ApiKeyDeleteOne struct { + _d *ApiKeyDelete +} + +// Where appends a list predicates to the ApiKeyDelete builder. +func (_d *ApiKeyDeleteOne) Where(ps ...predicate.ApiKey) *ApiKeyDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *ApiKeyDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{apikey.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ApiKeyDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/apikey_query.go b/backend/ent/apikey_query.go new file mode 100644 index 00000000..86051a60 --- /dev/null +++ b/backend/ent/apikey_query.go @@ -0,0 +1,684 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// ApiKeyQuery is the builder for querying ApiKey entities. +type ApiKeyQuery struct { + config + ctx *QueryContext + order []apikey.OrderOption + inters []Interceptor + predicates []predicate.ApiKey + withUser *UserQuery + withGroup *GroupQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ApiKeyQuery builder. +func (_q *ApiKeyQuery) Where(ps ...predicate.ApiKey) *ApiKeyQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *ApiKeyQuery) Limit(limit int) *ApiKeyQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *ApiKeyQuery) Offset(offset int) *ApiKeyQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *ApiKeyQuery) Unique(unique bool) *ApiKeyQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *ApiKeyQuery) Order(o ...apikey.OrderOption) *ApiKeyQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *ApiKeyQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.UserTable, apikey.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *ApiKeyQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.GroupTable, apikey.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first ApiKey entity from the query. +// Returns a *NotFoundError when no ApiKey was found. +func (_q *ApiKeyQuery) First(ctx context.Context) (*ApiKey, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{apikey.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *ApiKeyQuery) FirstX(ctx context.Context) *ApiKey { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first ApiKey ID from the query. +// Returns a *NotFoundError when no ApiKey ID was found. +func (_q *ApiKeyQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{apikey.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *ApiKeyQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single ApiKey entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one ApiKey entity is found. +// Returns a *NotFoundError when no ApiKey entities are found. +func (_q *ApiKeyQuery) Only(ctx context.Context) (*ApiKey, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{apikey.Label} + default: + return nil, &NotSingularError{apikey.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *ApiKeyQuery) OnlyX(ctx context.Context) *ApiKey { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only ApiKey ID in the query. +// Returns a *NotSingularError when more than one ApiKey ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *ApiKeyQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{apikey.Label} + default: + err = &NotSingularError{apikey.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *ApiKeyQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of ApiKeys. +func (_q *ApiKeyQuery) All(ctx context.Context) ([]*ApiKey, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*ApiKey, *ApiKeyQuery]() + return withInterceptors[[]*ApiKey](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *ApiKeyQuery) AllX(ctx context.Context) []*ApiKey { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of ApiKey IDs. +func (_q *ApiKeyQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(apikey.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *ApiKeyQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *ApiKeyQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*ApiKeyQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *ApiKeyQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *ApiKeyQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *ApiKeyQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ApiKeyQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *ApiKeyQuery) Clone() *ApiKeyQuery { + if _q == nil { + return nil + } + return &ApiKeyQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]apikey.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.ApiKey{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *ApiKeyQuery) WithUser(opts ...func(*UserQuery)) *ApiKeyQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *ApiKeyQuery) WithGroup(opts ...func(*GroupQuery)) *ApiKeyQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.ApiKey.Query(). +// GroupBy(apikey.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *ApiKeyQuery) GroupBy(field string, fields ...string) *ApiKeyGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ApiKeyGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = apikey.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.ApiKey.Query(). +// Select(apikey.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *ApiKeyQuery) Select(fields ...string) *ApiKeySelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ApiKeySelect{ApiKeyQuery: _q} + sbuild.label = apikey.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ApiKeySelect configured with the given aggregations. +func (_q *ApiKeyQuery) Aggregate(fns ...AggregateFunc) *ApiKeySelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *ApiKeyQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !apikey.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *ApiKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ApiKey, error) { + var ( + nodes = []*ApiKey{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withUser != nil, + _q.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ApiKey).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &ApiKey{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *ApiKey, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *ApiKey, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *ApiKeyQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*ApiKey, init func(*ApiKey), assign func(*ApiKey, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*ApiKey) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *ApiKeyQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*ApiKey, init func(*ApiKey), assign func(*ApiKey, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*ApiKey) + for i := range nodes { + if nodes[i].GroupID == nil { + continue + } + fk := *nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *ApiKeyQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *ApiKeyQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID) + for i := range fields { + if fields[i] != apikey.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(apikey.FieldUserID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(apikey.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *ApiKeyQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(apikey.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = apikey.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ApiKeyGroupBy is the group-by builder for ApiKey entities. +type ApiKeyGroupBy struct { + selector + build *ApiKeyQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *ApiKeyGroupBy) Aggregate(fns ...AggregateFunc) *ApiKeyGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *ApiKeyGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ApiKeyQuery, *ApiKeyGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *ApiKeyGroupBy) sqlScan(ctx context.Context, root *ApiKeyQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ApiKeySelect is the builder for selecting fields of ApiKey entities. +type ApiKeySelect struct { + *ApiKeyQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *ApiKeySelect) Aggregate(fns ...AggregateFunc) *ApiKeySelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *ApiKeySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ApiKeyQuery, *ApiKeySelect](ctx, _s.ApiKeyQuery, _s, _s.inters, v) +} + +func (_s *ApiKeySelect) sqlScan(ctx context.Context, root *ApiKeyQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/apikey_update.go b/backend/ent/apikey_update.go new file mode 100644 index 00000000..3917d068 --- /dev/null +++ b/backend/ent/apikey_update.go @@ -0,0 +1,660 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// ApiKeyUpdate is the builder for updating ApiKey entities. +type ApiKeyUpdate struct { + config + hooks []Hook + mutation *ApiKeyMutation +} + +// Where appends a list predicates to the ApiKeyUpdate builder. +func (_u *ApiKeyUpdate) Where(ps ...predicate.ApiKey) *ApiKeyUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ApiKeyUpdate) SetUpdatedAt(v time.Time) *ApiKeyUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ApiKeyUpdate) SetDeletedAt(v time.Time) *ApiKeyUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ApiKeyUpdate) SetNillableDeletedAt(v *time.Time) *ApiKeyUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ApiKeyUpdate) ClearDeletedAt() *ApiKeyUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *ApiKeyUpdate) SetUserID(v int64) *ApiKeyUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *ApiKeyUpdate) SetNillableUserID(v *int64) *ApiKeyUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetKey sets the "key" field. +func (_u *ApiKeyUpdate) SetKey(v string) *ApiKeyUpdate { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *ApiKeyUpdate) SetNillableKey(v *string) *ApiKeyUpdate { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *ApiKeyUpdate) SetName(v string) *ApiKeyUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ApiKeyUpdate) SetNillableName(v *string) *ApiKeyUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *ApiKeyUpdate) SetGroupID(v int64) *ApiKeyUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *ApiKeyUpdate) SetNillableGroupID(v *int64) *ApiKeyUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *ApiKeyUpdate) ClearGroupID() *ApiKeyUpdate { + _u.mutation.ClearGroupID() + return _u +} + +// SetStatus sets the "status" field. +func (_u *ApiKeyUpdate) SetStatus(v string) *ApiKeyUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ApiKeyUpdate) SetNillableStatus(v *string) *ApiKeyUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *ApiKeyUpdate) SetUser(v *User) *ApiKeyUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *ApiKeyUpdate) SetGroup(v *Group) *ApiKeyUpdate { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the ApiKeyMutation object of the builder. +func (_u *ApiKeyUpdate) Mutation() *ApiKeyMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *ApiKeyUpdate) ClearUser() *ApiKeyUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *ApiKeyUpdate) ClearGroup() *ApiKeyUpdate { + _u.mutation.ClearGroup() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *ApiKeyUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ApiKeyUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *ApiKeyUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ApiKeyUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ApiKeyUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if apikey.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := apikey.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ApiKeyUpdate) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := apikey.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "ApiKey.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := apikey.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ApiKey.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := apikey.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ApiKey.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "ApiKey.user"`) + } + return nil +} + +func (_u *ApiKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(apikey.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(apikey.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(apikey.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(apikey.FieldStatus, field.TypeString, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{apikey.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// ApiKeyUpdateOne is the builder for updating a single ApiKey entity. +type ApiKeyUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ApiKeyMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ApiKeyUpdateOne) SetUpdatedAt(v time.Time) *ApiKeyUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ApiKeyUpdateOne) SetDeletedAt(v time.Time) *ApiKeyUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ApiKeyUpdateOne) SetNillableDeletedAt(v *time.Time) *ApiKeyUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ApiKeyUpdateOne) ClearDeletedAt() *ApiKeyUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *ApiKeyUpdateOne) SetUserID(v int64) *ApiKeyUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *ApiKeyUpdateOne) SetNillableUserID(v *int64) *ApiKeyUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetKey sets the "key" field. +func (_u *ApiKeyUpdateOne) SetKey(v string) *ApiKeyUpdateOne { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *ApiKeyUpdateOne) SetNillableKey(v *string) *ApiKeyUpdateOne { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *ApiKeyUpdateOne) SetName(v string) *ApiKeyUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ApiKeyUpdateOne) SetNillableName(v *string) *ApiKeyUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *ApiKeyUpdateOne) SetGroupID(v int64) *ApiKeyUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *ApiKeyUpdateOne) SetNillableGroupID(v *int64) *ApiKeyUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *ApiKeyUpdateOne) ClearGroupID() *ApiKeyUpdateOne { + _u.mutation.ClearGroupID() + return _u +} + +// SetStatus sets the "status" field. +func (_u *ApiKeyUpdateOne) SetStatus(v string) *ApiKeyUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ApiKeyUpdateOne) SetNillableStatus(v *string) *ApiKeyUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *ApiKeyUpdateOne) SetUser(v *User) *ApiKeyUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *ApiKeyUpdateOne) SetGroup(v *Group) *ApiKeyUpdateOne { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the ApiKeyMutation object of the builder. +func (_u *ApiKeyUpdateOne) Mutation() *ApiKeyMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *ApiKeyUpdateOne) ClearUser() *ApiKeyUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *ApiKeyUpdateOne) ClearGroup() *ApiKeyUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// Where appends a list predicates to the ApiKeyUpdate builder. +func (_u *ApiKeyUpdateOne) Where(ps ...predicate.ApiKey) *ApiKeyUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *ApiKeyUpdateOne) Select(field string, fields ...string) *ApiKeyUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated ApiKey entity. +func (_u *ApiKeyUpdateOne) Save(ctx context.Context) (*ApiKey, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ApiKeyUpdateOne) SaveX(ctx context.Context) *ApiKey { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *ApiKeyUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ApiKeyUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ApiKeyUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if apikey.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := apikey.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ApiKeyUpdateOne) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := apikey.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "ApiKey.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := apikey.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ApiKey.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := apikey.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ApiKey.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "ApiKey.user"`) + } + return nil +} + +func (_u *ApiKeyUpdateOne) sqlSave(ctx context.Context) (_node *ApiKey, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ApiKey.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID) + for _, f := range fields { + if !apikey.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != apikey.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(apikey.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(apikey.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(apikey.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(apikey.FieldStatus, field.TypeString, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &ApiKey{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{apikey.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/client.go b/backend/ent/client.go new file mode 100644 index 00000000..00d992cc --- /dev/null +++ b/backend/ent/client.go @@ -0,0 +1,1950 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "github.com/Wei-Shaw/sub2api/ent/migrate" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Account is the client for interacting with the Account builders. + Account *AccountClient + // AccountGroup is the client for interacting with the AccountGroup builders. + AccountGroup *AccountGroupClient + // ApiKey is the client for interacting with the ApiKey builders. + ApiKey *ApiKeyClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + // Proxy is the client for interacting with the Proxy builders. + Proxy *ProxyClient + // RedeemCode is the client for interacting with the RedeemCode builders. + RedeemCode *RedeemCodeClient + // Setting is the client for interacting with the Setting builders. + Setting *SettingClient + // User is the client for interacting with the User builders. + User *UserClient + // UserAllowedGroup is the client for interacting with the UserAllowedGroup builders. + UserAllowedGroup *UserAllowedGroupClient + // UserSubscription is the client for interacting with the UserSubscription builders. + UserSubscription *UserSubscriptionClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.Account = NewAccountClient(c.config) + c.AccountGroup = NewAccountGroupClient(c.config) + c.ApiKey = NewApiKeyClient(c.config) + c.Group = NewGroupClient(c.config) + c.Proxy = NewProxyClient(c.config) + c.RedeemCode = NewRedeemCodeClient(c.config) + c.Setting = NewSettingClient(c.config) + c.User = NewUserClient(c.config) + c.UserAllowedGroup = NewUserAllowedGroupClient(c.config) + c.UserSubscription = NewUserSubscriptionClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + ApiKey: NewApiKeyClient(cfg), + Group: NewGroupClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + Setting: NewSettingClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + ApiKey: NewApiKeyClient(cfg), + Group: NewGroupClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + Setting: NewSettingClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// Account. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + for _, n := range []interface{ Use(...Hook) }{ + c.Account, c.AccountGroup, c.ApiKey, c.Group, c.Proxy, c.RedeemCode, c.Setting, + c.User, c.UserAllowedGroup, c.UserSubscription, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.Account, c.AccountGroup, c.ApiKey, c.Group, c.Proxy, c.RedeemCode, c.Setting, + c.User, c.UserAllowedGroup, c.UserSubscription, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *AccountMutation: + return c.Account.mutate(ctx, m) + case *AccountGroupMutation: + return c.AccountGroup.mutate(ctx, m) + case *ApiKeyMutation: + return c.ApiKey.mutate(ctx, m) + case *GroupMutation: + return c.Group.mutate(ctx, m) + case *ProxyMutation: + return c.Proxy.mutate(ctx, m) + case *RedeemCodeMutation: + return c.RedeemCode.mutate(ctx, m) + case *SettingMutation: + return c.Setting.mutate(ctx, m) + case *UserMutation: + return c.User.mutate(ctx, m) + case *UserAllowedGroupMutation: + return c.UserAllowedGroup.mutate(ctx, m) + case *UserSubscriptionMutation: + return c.UserSubscription.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// AccountClient is a client for the Account schema. +type AccountClient struct { + config +} + +// NewAccountClient returns a client for the Account from the given config. +func NewAccountClient(c config) *AccountClient { + return &AccountClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `account.Hooks(f(g(h())))`. +func (c *AccountClient) Use(hooks ...Hook) { + c.hooks.Account = append(c.hooks.Account, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `account.Intercept(f(g(h())))`. +func (c *AccountClient) Intercept(interceptors ...Interceptor) { + c.inters.Account = append(c.inters.Account, interceptors...) +} + +// Create returns a builder for creating a Account entity. +func (c *AccountClient) Create() *AccountCreate { + mutation := newAccountMutation(c.config, OpCreate) + return &AccountCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Account entities. +func (c *AccountClient) CreateBulk(builders ...*AccountCreate) *AccountCreateBulk { + return &AccountCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AccountClient) MapCreateBulk(slice any, setFunc func(*AccountCreate, int)) *AccountCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AccountCreateBulk{err: fmt.Errorf("calling to AccountClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AccountCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AccountCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Account. +func (c *AccountClient) Update() *AccountUpdate { + mutation := newAccountMutation(c.config, OpUpdate) + return &AccountUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AccountClient) UpdateOne(_m *Account) *AccountUpdateOne { + mutation := newAccountMutation(c.config, OpUpdateOne, withAccount(_m)) + return &AccountUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AccountClient) UpdateOneID(id int64) *AccountUpdateOne { + mutation := newAccountMutation(c.config, OpUpdateOne, withAccountID(id)) + return &AccountUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Account. +func (c *AccountClient) Delete() *AccountDelete { + mutation := newAccountMutation(c.config, OpDelete) + return &AccountDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AccountClient) DeleteOne(_m *Account) *AccountDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AccountClient) DeleteOneID(id int64) *AccountDeleteOne { + builder := c.Delete().Where(account.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AccountDeleteOne{builder} +} + +// Query returns a query builder for Account. +func (c *AccountClient) Query() *AccountQuery { + return &AccountQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAccount}, + inters: c.Interceptors(), + } +} + +// Get returns a Account entity by its id. +func (c *AccountClient) Get(ctx context.Context, id int64) (*Account, error) { + return c.Query().Where(account.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AccountClient) GetX(ctx context.Context, id int64) *Account { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryGroups queries the groups edge of a Account. +func (c *AccountClient) QueryGroups(_m *Account) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, account.GroupsTable, account.GroupsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAccountGroups queries the account_groups edge of a Account. +func (c *AccountClient) QueryAccountGroups(_m *Account) *AccountGroupQuery { + query := (&AccountGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, id), + sqlgraph.To(accountgroup.Table, accountgroup.AccountColumn), + sqlgraph.Edge(sqlgraph.O2M, true, account.AccountGroupsTable, account.AccountGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AccountClient) Hooks() []Hook { + hooks := c.hooks.Account + return append(hooks[:len(hooks):len(hooks)], account.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *AccountClient) Interceptors() []Interceptor { + inters := c.inters.Account + return append(inters[:len(inters):len(inters)], account.Interceptors[:]...) +} + +func (c *AccountClient) mutate(ctx context.Context, m *AccountMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AccountCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AccountUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AccountUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AccountDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Account mutation op: %q", m.Op()) + } +} + +// AccountGroupClient is a client for the AccountGroup schema. +type AccountGroupClient struct { + config +} + +// NewAccountGroupClient returns a client for the AccountGroup from the given config. +func NewAccountGroupClient(c config) *AccountGroupClient { + return &AccountGroupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `accountgroup.Hooks(f(g(h())))`. +func (c *AccountGroupClient) Use(hooks ...Hook) { + c.hooks.AccountGroup = append(c.hooks.AccountGroup, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `accountgroup.Intercept(f(g(h())))`. +func (c *AccountGroupClient) Intercept(interceptors ...Interceptor) { + c.inters.AccountGroup = append(c.inters.AccountGroup, interceptors...) +} + +// Create returns a builder for creating a AccountGroup entity. +func (c *AccountGroupClient) Create() *AccountGroupCreate { + mutation := newAccountGroupMutation(c.config, OpCreate) + return &AccountGroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AccountGroup entities. +func (c *AccountGroupClient) CreateBulk(builders ...*AccountGroupCreate) *AccountGroupCreateBulk { + return &AccountGroupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AccountGroupClient) MapCreateBulk(slice any, setFunc func(*AccountGroupCreate, int)) *AccountGroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AccountGroupCreateBulk{err: fmt.Errorf("calling to AccountGroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AccountGroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AccountGroupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AccountGroup. +func (c *AccountGroupClient) Update() *AccountGroupUpdate { + mutation := newAccountGroupMutation(c.config, OpUpdate) + return &AccountGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AccountGroupClient) UpdateOne(_m *AccountGroup) *AccountGroupUpdateOne { + mutation := newAccountGroupMutation(c.config, OpUpdateOne) + mutation.account = &_m.AccountID + mutation.group = &_m.GroupID + return &AccountGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AccountGroup. +func (c *AccountGroupClient) Delete() *AccountGroupDelete { + mutation := newAccountGroupMutation(c.config, OpDelete) + return &AccountGroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Query returns a query builder for AccountGroup. +func (c *AccountGroupClient) Query() *AccountGroupQuery { + return &AccountGroupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAccountGroup}, + inters: c.Interceptors(), + } +} + +// QueryAccount queries the account edge of a AccountGroup. +func (c *AccountGroupClient) QueryAccount(_m *AccountGroup) *AccountQuery { + return c.Query(). + Where(accountgroup.AccountID(_m.AccountID), accountgroup.GroupID(_m.GroupID)). + QueryAccount() +} + +// QueryGroup queries the group edge of a AccountGroup. +func (c *AccountGroupClient) QueryGroup(_m *AccountGroup) *GroupQuery { + return c.Query(). + Where(accountgroup.AccountID(_m.AccountID), accountgroup.GroupID(_m.GroupID)). + QueryGroup() +} + +// Hooks returns the client hooks. +func (c *AccountGroupClient) Hooks() []Hook { + return c.hooks.AccountGroup +} + +// Interceptors returns the client interceptors. +func (c *AccountGroupClient) Interceptors() []Interceptor { + return c.inters.AccountGroup +} + +func (c *AccountGroupClient) mutate(ctx context.Context, m *AccountGroupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AccountGroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AccountGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AccountGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AccountGroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AccountGroup mutation op: %q", m.Op()) + } +} + +// ApiKeyClient is a client for the ApiKey schema. +type ApiKeyClient struct { + config +} + +// NewApiKeyClient returns a client for the ApiKey from the given config. +func NewApiKeyClient(c config) *ApiKeyClient { + return &ApiKeyClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `apikey.Hooks(f(g(h())))`. +func (c *ApiKeyClient) Use(hooks ...Hook) { + c.hooks.ApiKey = append(c.hooks.ApiKey, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `apikey.Intercept(f(g(h())))`. +func (c *ApiKeyClient) Intercept(interceptors ...Interceptor) { + c.inters.ApiKey = append(c.inters.ApiKey, interceptors...) +} + +// Create returns a builder for creating a ApiKey entity. +func (c *ApiKeyClient) Create() *ApiKeyCreate { + mutation := newApiKeyMutation(c.config, OpCreate) + return &ApiKeyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of ApiKey entities. +func (c *ApiKeyClient) CreateBulk(builders ...*ApiKeyCreate) *ApiKeyCreateBulk { + return &ApiKeyCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ApiKeyClient) MapCreateBulk(slice any, setFunc func(*ApiKeyCreate, int)) *ApiKeyCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ApiKeyCreateBulk{err: fmt.Errorf("calling to ApiKeyClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ApiKeyCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ApiKeyCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for ApiKey. +func (c *ApiKeyClient) Update() *ApiKeyUpdate { + mutation := newApiKeyMutation(c.config, OpUpdate) + return &ApiKeyUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ApiKeyClient) UpdateOne(_m *ApiKey) *ApiKeyUpdateOne { + mutation := newApiKeyMutation(c.config, OpUpdateOne, withApiKey(_m)) + return &ApiKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ApiKeyClient) UpdateOneID(id int64) *ApiKeyUpdateOne { + mutation := newApiKeyMutation(c.config, OpUpdateOne, withApiKeyID(id)) + return &ApiKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for ApiKey. +func (c *ApiKeyClient) Delete() *ApiKeyDelete { + mutation := newApiKeyMutation(c.config, OpDelete) + return &ApiKeyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ApiKeyClient) DeleteOne(_m *ApiKey) *ApiKeyDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ApiKeyClient) DeleteOneID(id int64) *ApiKeyDeleteOne { + builder := c.Delete().Where(apikey.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ApiKeyDeleteOne{builder} +} + +// Query returns a query builder for ApiKey. +func (c *ApiKeyClient) Query() *ApiKeyQuery { + return &ApiKeyQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeApiKey}, + inters: c.Interceptors(), + } +} + +// Get returns a ApiKey entity by its id. +func (c *ApiKeyClient) Get(ctx context.Context, id int64) (*ApiKey, error) { + return c.Query().Where(apikey.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ApiKeyClient) GetX(ctx context.Context, id int64) *ApiKey { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a ApiKey. +func (c *ApiKeyClient) QueryUser(_m *ApiKey) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.UserTable, apikey.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryGroup queries the group edge of a ApiKey. +func (c *ApiKeyClient) QueryGroup(_m *ApiKey) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.GroupTable, apikey.GroupColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ApiKeyClient) Hooks() []Hook { + hooks := c.hooks.ApiKey + return append(hooks[:len(hooks):len(hooks)], apikey.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *ApiKeyClient) Interceptors() []Interceptor { + inters := c.inters.ApiKey + return append(inters[:len(inters):len(inters)], apikey.Interceptors[:]...) +} + +func (c *ApiKeyClient) mutate(ctx context.Context, m *ApiKeyMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ApiKeyCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ApiKeyUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ApiKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ApiKeyDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ApiKey mutation op: %q", m.Op()) + } +} + +// GroupClient is a client for the Group schema. +type GroupClient struct { + config +} + +// NewGroupClient returns a client for the Group from the given config. +func NewGroupClient(c config) *GroupClient { + return &GroupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `group.Hooks(f(g(h())))`. +func (c *GroupClient) Use(hooks ...Hook) { + c.hooks.Group = append(c.hooks.Group, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `group.Intercept(f(g(h())))`. +func (c *GroupClient) Intercept(interceptors ...Interceptor) { + c.inters.Group = append(c.inters.Group, interceptors...) +} + +// Create returns a builder for creating a Group entity. +func (c *GroupClient) Create() *GroupCreate { + mutation := newGroupMutation(c.config, OpCreate) + return &GroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Group entities. +func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk { + return &GroupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *GroupClient) MapCreateBulk(slice any, setFunc func(*GroupCreate, int)) *GroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &GroupCreateBulk{err: fmt.Errorf("calling to GroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*GroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &GroupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Group. +func (c *GroupClient) Update() *GroupUpdate { + mutation := newGroupMutation(c.config, OpUpdate) + return &GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *GroupClient) UpdateOne(_m *Group) *GroupUpdateOne { + mutation := newGroupMutation(c.config, OpUpdateOne, withGroup(_m)) + return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *GroupClient) UpdateOneID(id int64) *GroupUpdateOne { + mutation := newGroupMutation(c.config, OpUpdateOne, withGroupID(id)) + return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Group. +func (c *GroupClient) Delete() *GroupDelete { + mutation := newGroupMutation(c.config, OpDelete) + return &GroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *GroupClient) DeleteOne(_m *Group) *GroupDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *GroupClient) DeleteOneID(id int64) *GroupDeleteOne { + builder := c.Delete().Where(group.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &GroupDeleteOne{builder} +} + +// Query returns a query builder for Group. +func (c *GroupClient) Query() *GroupQuery { + return &GroupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeGroup}, + inters: c.Interceptors(), + } +} + +// Get returns a Group entity by its id. +func (c *GroupClient) Get(ctx context.Context, id int64) (*Group, error) { + return c.Query().Where(group.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *GroupClient) GetX(ctx context.Context, id int64) *Group { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAPIKeys queries the api_keys edge of a Group. +func (c *GroupClient) QueryAPIKeys(_m *Group) *ApiKeyQuery { + query := (&ApiKeyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.APIKeysTable, group.APIKeysColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryRedeemCodes queries the redeem_codes edge of a Group. +func (c *GroupClient) QueryRedeemCodes(_m *Group) *RedeemCodeQuery { + query := (&RedeemCodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.RedeemCodesTable, group.RedeemCodesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QuerySubscriptions queries the subscriptions edge of a Group. +func (c *GroupClient) QuerySubscriptions(_m *Group) *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.SubscriptionsTable, group.SubscriptionsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAccounts queries the accounts edge of a Group. +func (c *GroupClient) QueryAccounts(_m *Group) *AccountQuery { + query := (&AccountClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AccountsTable, group.AccountsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAllowedUsers queries the allowed_users edge of a Group. +func (c *GroupClient) QueryAllowedUsers(_m *Group) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AllowedUsersTable, group.AllowedUsersPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAccountGroups queries the account_groups edge of a Group. +func (c *GroupClient) QueryAccountGroups(_m *Group) *AccountGroupQuery { + query := (&AccountGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(accountgroup.Table, accountgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.AccountGroupsTable, group.AccountGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUserAllowedGroups queries the user_allowed_groups edge of a Group. +func (c *GroupClient) QueryUserAllowedGroups(_m *Group) *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.UserAllowedGroupsTable, group.UserAllowedGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *GroupClient) Hooks() []Hook { + hooks := c.hooks.Group + return append(hooks[:len(hooks):len(hooks)], group.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *GroupClient) Interceptors() []Interceptor { + inters := c.inters.Group + return append(inters[:len(inters):len(inters)], group.Interceptors[:]...) +} + +func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&GroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&GroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Group mutation op: %q", m.Op()) + } +} + +// ProxyClient is a client for the Proxy schema. +type ProxyClient struct { + config +} + +// NewProxyClient returns a client for the Proxy from the given config. +func NewProxyClient(c config) *ProxyClient { + return &ProxyClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `proxy.Hooks(f(g(h())))`. +func (c *ProxyClient) Use(hooks ...Hook) { + c.hooks.Proxy = append(c.hooks.Proxy, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `proxy.Intercept(f(g(h())))`. +func (c *ProxyClient) Intercept(interceptors ...Interceptor) { + c.inters.Proxy = append(c.inters.Proxy, interceptors...) +} + +// Create returns a builder for creating a Proxy entity. +func (c *ProxyClient) Create() *ProxyCreate { + mutation := newProxyMutation(c.config, OpCreate) + return &ProxyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Proxy entities. +func (c *ProxyClient) CreateBulk(builders ...*ProxyCreate) *ProxyCreateBulk { + return &ProxyCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ProxyClient) MapCreateBulk(slice any, setFunc func(*ProxyCreate, int)) *ProxyCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ProxyCreateBulk{err: fmt.Errorf("calling to ProxyClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ProxyCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ProxyCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Proxy. +func (c *ProxyClient) Update() *ProxyUpdate { + mutation := newProxyMutation(c.config, OpUpdate) + return &ProxyUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ProxyClient) UpdateOne(_m *Proxy) *ProxyUpdateOne { + mutation := newProxyMutation(c.config, OpUpdateOne, withProxy(_m)) + return &ProxyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ProxyClient) UpdateOneID(id int64) *ProxyUpdateOne { + mutation := newProxyMutation(c.config, OpUpdateOne, withProxyID(id)) + return &ProxyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Proxy. +func (c *ProxyClient) Delete() *ProxyDelete { + mutation := newProxyMutation(c.config, OpDelete) + return &ProxyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ProxyClient) DeleteOne(_m *Proxy) *ProxyDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ProxyClient) DeleteOneID(id int64) *ProxyDeleteOne { + builder := c.Delete().Where(proxy.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ProxyDeleteOne{builder} +} + +// Query returns a query builder for Proxy. +func (c *ProxyClient) Query() *ProxyQuery { + return &ProxyQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeProxy}, + inters: c.Interceptors(), + } +} + +// Get returns a Proxy entity by its id. +func (c *ProxyClient) Get(ctx context.Context, id int64) (*Proxy, error) { + return c.Query().Where(proxy.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ProxyClient) GetX(ctx context.Context, id int64) *Proxy { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *ProxyClient) Hooks() []Hook { + hooks := c.hooks.Proxy + return append(hooks[:len(hooks):len(hooks)], proxy.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *ProxyClient) Interceptors() []Interceptor { + inters := c.inters.Proxy + return append(inters[:len(inters):len(inters)], proxy.Interceptors[:]...) +} + +func (c *ProxyClient) mutate(ctx context.Context, m *ProxyMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ProxyCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ProxyUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ProxyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ProxyDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Proxy mutation op: %q", m.Op()) + } +} + +// RedeemCodeClient is a client for the RedeemCode schema. +type RedeemCodeClient struct { + config +} + +// NewRedeemCodeClient returns a client for the RedeemCode from the given config. +func NewRedeemCodeClient(c config) *RedeemCodeClient { + return &RedeemCodeClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `redeemcode.Hooks(f(g(h())))`. +func (c *RedeemCodeClient) Use(hooks ...Hook) { + c.hooks.RedeemCode = append(c.hooks.RedeemCode, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `redeemcode.Intercept(f(g(h())))`. +func (c *RedeemCodeClient) Intercept(interceptors ...Interceptor) { + c.inters.RedeemCode = append(c.inters.RedeemCode, interceptors...) +} + +// Create returns a builder for creating a RedeemCode entity. +func (c *RedeemCodeClient) Create() *RedeemCodeCreate { + mutation := newRedeemCodeMutation(c.config, OpCreate) + return &RedeemCodeCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of RedeemCode entities. +func (c *RedeemCodeClient) CreateBulk(builders ...*RedeemCodeCreate) *RedeemCodeCreateBulk { + return &RedeemCodeCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *RedeemCodeClient) MapCreateBulk(slice any, setFunc func(*RedeemCodeCreate, int)) *RedeemCodeCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &RedeemCodeCreateBulk{err: fmt.Errorf("calling to RedeemCodeClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*RedeemCodeCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &RedeemCodeCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for RedeemCode. +func (c *RedeemCodeClient) Update() *RedeemCodeUpdate { + mutation := newRedeemCodeMutation(c.config, OpUpdate) + return &RedeemCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *RedeemCodeClient) UpdateOne(_m *RedeemCode) *RedeemCodeUpdateOne { + mutation := newRedeemCodeMutation(c.config, OpUpdateOne, withRedeemCode(_m)) + return &RedeemCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *RedeemCodeClient) UpdateOneID(id int64) *RedeemCodeUpdateOne { + mutation := newRedeemCodeMutation(c.config, OpUpdateOne, withRedeemCodeID(id)) + return &RedeemCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for RedeemCode. +func (c *RedeemCodeClient) Delete() *RedeemCodeDelete { + mutation := newRedeemCodeMutation(c.config, OpDelete) + return &RedeemCodeDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *RedeemCodeClient) DeleteOne(_m *RedeemCode) *RedeemCodeDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *RedeemCodeClient) DeleteOneID(id int64) *RedeemCodeDeleteOne { + builder := c.Delete().Where(redeemcode.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &RedeemCodeDeleteOne{builder} +} + +// Query returns a query builder for RedeemCode. +func (c *RedeemCodeClient) Query() *RedeemCodeQuery { + return &RedeemCodeQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeRedeemCode}, + inters: c.Interceptors(), + } +} + +// Get returns a RedeemCode entity by its id. +func (c *RedeemCodeClient) Get(ctx context.Context, id int64) (*RedeemCode, error) { + return c.Query().Where(redeemcode.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *RedeemCodeClient) GetX(ctx context.Context, id int64) *RedeemCode { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a RedeemCode. +func (c *RedeemCodeClient) QueryUser(_m *RedeemCode) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.UserTable, redeemcode.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryGroup queries the group edge of a RedeemCode. +func (c *RedeemCodeClient) QueryGroup(_m *RedeemCode) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.GroupTable, redeemcode.GroupColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *RedeemCodeClient) Hooks() []Hook { + return c.hooks.RedeemCode +} + +// Interceptors returns the client interceptors. +func (c *RedeemCodeClient) Interceptors() []Interceptor { + return c.inters.RedeemCode +} + +func (c *RedeemCodeClient) mutate(ctx context.Context, m *RedeemCodeMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&RedeemCodeCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&RedeemCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&RedeemCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&RedeemCodeDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown RedeemCode mutation op: %q", m.Op()) + } +} + +// SettingClient is a client for the Setting schema. +type SettingClient struct { + config +} + +// NewSettingClient returns a client for the Setting from the given config. +func NewSettingClient(c config) *SettingClient { + return &SettingClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `setting.Hooks(f(g(h())))`. +func (c *SettingClient) Use(hooks ...Hook) { + c.hooks.Setting = append(c.hooks.Setting, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `setting.Intercept(f(g(h())))`. +func (c *SettingClient) Intercept(interceptors ...Interceptor) { + c.inters.Setting = append(c.inters.Setting, interceptors...) +} + +// Create returns a builder for creating a Setting entity. +func (c *SettingClient) Create() *SettingCreate { + mutation := newSettingMutation(c.config, OpCreate) + return &SettingCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Setting entities. +func (c *SettingClient) CreateBulk(builders ...*SettingCreate) *SettingCreateBulk { + return &SettingCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *SettingClient) MapCreateBulk(slice any, setFunc func(*SettingCreate, int)) *SettingCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &SettingCreateBulk{err: fmt.Errorf("calling to SettingClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*SettingCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &SettingCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Setting. +func (c *SettingClient) Update() *SettingUpdate { + mutation := newSettingMutation(c.config, OpUpdate) + return &SettingUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *SettingClient) UpdateOne(_m *Setting) *SettingUpdateOne { + mutation := newSettingMutation(c.config, OpUpdateOne, withSetting(_m)) + return &SettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *SettingClient) UpdateOneID(id int64) *SettingUpdateOne { + mutation := newSettingMutation(c.config, OpUpdateOne, withSettingID(id)) + return &SettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Setting. +func (c *SettingClient) Delete() *SettingDelete { + mutation := newSettingMutation(c.config, OpDelete) + return &SettingDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *SettingClient) DeleteOne(_m *Setting) *SettingDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *SettingClient) DeleteOneID(id int64) *SettingDeleteOne { + builder := c.Delete().Where(setting.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &SettingDeleteOne{builder} +} + +// Query returns a query builder for Setting. +func (c *SettingClient) Query() *SettingQuery { + return &SettingQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeSetting}, + inters: c.Interceptors(), + } +} + +// Get returns a Setting entity by its id. +func (c *SettingClient) Get(ctx context.Context, id int64) (*Setting, error) { + return c.Query().Where(setting.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *SettingClient) GetX(ctx context.Context, id int64) *Setting { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *SettingClient) Hooks() []Hook { + return c.hooks.Setting +} + +// Interceptors returns the client interceptors. +func (c *SettingClient) Interceptors() []Interceptor { + return c.inters.Setting +} + +func (c *SettingClient) mutate(ctx context.Context, m *SettingMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&SettingCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&SettingUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&SettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&SettingDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Setting mutation op: %q", m.Op()) + } +} + +// UserClient is a client for the User schema. +type UserClient struct { + config +} + +// NewUserClient returns a client for the User from the given config. +func NewUserClient(c config) *UserClient { + return &UserClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`. +func (c *UserClient) Use(hooks ...Hook) { + c.hooks.User = append(c.hooks.User, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`. +func (c *UserClient) Intercept(interceptors ...Interceptor) { + c.inters.User = append(c.inters.User, interceptors...) +} + +// Create returns a builder for creating a User entity. +func (c *UserClient) Create() *UserCreate { + mutation := newUserMutation(c.config, OpCreate) + return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of User entities. +func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { + return &UserCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for User. +func (c *UserClient) Update() *UserUpdate { + mutation := newUserMutation(c.config, OpUpdate) + return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserClient) UpdateOne(_m *User) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUser(_m)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserClient) UpdateOneID(id int64) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for User. +func (c *UserClient) Delete() *UserDelete { + mutation := newUserMutation(c.config, OpDelete) + return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserClient) DeleteOne(_m *User) *UserDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserClient) DeleteOneID(id int64) *UserDeleteOne { + builder := c.Delete().Where(user.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserDeleteOne{builder} +} + +// Query returns a query builder for User. +func (c *UserClient) Query() *UserQuery { + return &UserQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUser}, + inters: c.Interceptors(), + } +} + +// Get returns a User entity by its id. +func (c *UserClient) Get(ctx context.Context, id int64) (*User, error) { + return c.Query().Where(user.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserClient) GetX(ctx context.Context, id int64) *User { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAPIKeys queries the api_keys edge of a User. +func (c *UserClient) QueryAPIKeys(_m *User) *ApiKeyQuery { + query := (&ApiKeyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.APIKeysTable, user.APIKeysColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryRedeemCodes queries the redeem_codes edge of a User. +func (c *UserClient) QueryRedeemCodes(_m *User) *RedeemCodeQuery { + query := (&RedeemCodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.RedeemCodesTable, user.RedeemCodesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QuerySubscriptions queries the subscriptions edge of a User. +func (c *UserClient) QuerySubscriptions(_m *User) *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.SubscriptionsTable, user.SubscriptionsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAssignedSubscriptions queries the assigned_subscriptions edge of a User. +func (c *UserClient) QueryAssignedSubscriptions(_m *User) *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AssignedSubscriptionsTable, user.AssignedSubscriptionsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAllowedGroups queries the allowed_groups edge of a User. +func (c *UserClient) QueryAllowedGroups(_m *User) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, user.AllowedGroupsTable, user.AllowedGroupsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUserAllowedGroups queries the user_allowed_groups edge of a User. +func (c *UserClient) QueryUserAllowedGroups(_m *User) *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.UserColumn), + sqlgraph.Edge(sqlgraph.O2M, true, user.UserAllowedGroupsTable, user.UserAllowedGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserClient) Hooks() []Hook { + hooks := c.hooks.User + return append(hooks[:len(hooks):len(hooks)], user.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *UserClient) Interceptors() []Interceptor { + inters := c.inters.User + return append(inters[:len(inters):len(inters)], user.Interceptors[:]...) +} + +func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) + } +} + +// UserAllowedGroupClient is a client for the UserAllowedGroup schema. +type UserAllowedGroupClient struct { + config +} + +// NewUserAllowedGroupClient returns a client for the UserAllowedGroup from the given config. +func NewUserAllowedGroupClient(c config) *UserAllowedGroupClient { + return &UserAllowedGroupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `userallowedgroup.Hooks(f(g(h())))`. +func (c *UserAllowedGroupClient) Use(hooks ...Hook) { + c.hooks.UserAllowedGroup = append(c.hooks.UserAllowedGroup, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `userallowedgroup.Intercept(f(g(h())))`. +func (c *UserAllowedGroupClient) Intercept(interceptors ...Interceptor) { + c.inters.UserAllowedGroup = append(c.inters.UserAllowedGroup, interceptors...) +} + +// Create returns a builder for creating a UserAllowedGroup entity. +func (c *UserAllowedGroupClient) Create() *UserAllowedGroupCreate { + mutation := newUserAllowedGroupMutation(c.config, OpCreate) + return &UserAllowedGroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserAllowedGroup entities. +func (c *UserAllowedGroupClient) CreateBulk(builders ...*UserAllowedGroupCreate) *UserAllowedGroupCreateBulk { + return &UserAllowedGroupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserAllowedGroupClient) MapCreateBulk(slice any, setFunc func(*UserAllowedGroupCreate, int)) *UserAllowedGroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserAllowedGroupCreateBulk{err: fmt.Errorf("calling to UserAllowedGroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserAllowedGroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserAllowedGroupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserAllowedGroup. +func (c *UserAllowedGroupClient) Update() *UserAllowedGroupUpdate { + mutation := newUserAllowedGroupMutation(c.config, OpUpdate) + return &UserAllowedGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserAllowedGroupClient) UpdateOne(_m *UserAllowedGroup) *UserAllowedGroupUpdateOne { + mutation := newUserAllowedGroupMutation(c.config, OpUpdateOne) + mutation.user = &_m.UserID + mutation.group = &_m.GroupID + return &UserAllowedGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserAllowedGroup. +func (c *UserAllowedGroupClient) Delete() *UserAllowedGroupDelete { + mutation := newUserAllowedGroupMutation(c.config, OpDelete) + return &UserAllowedGroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Query returns a query builder for UserAllowedGroup. +func (c *UserAllowedGroupClient) Query() *UserAllowedGroupQuery { + return &UserAllowedGroupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserAllowedGroup}, + inters: c.Interceptors(), + } +} + +// QueryUser queries the user edge of a UserAllowedGroup. +func (c *UserAllowedGroupClient) QueryUser(_m *UserAllowedGroup) *UserQuery { + return c.Query(). + Where(userallowedgroup.UserID(_m.UserID), userallowedgroup.GroupID(_m.GroupID)). + QueryUser() +} + +// QueryGroup queries the group edge of a UserAllowedGroup. +func (c *UserAllowedGroupClient) QueryGroup(_m *UserAllowedGroup) *GroupQuery { + return c.Query(). + Where(userallowedgroup.UserID(_m.UserID), userallowedgroup.GroupID(_m.GroupID)). + QueryGroup() +} + +// Hooks returns the client hooks. +func (c *UserAllowedGroupClient) Hooks() []Hook { + return c.hooks.UserAllowedGroup +} + +// Interceptors returns the client interceptors. +func (c *UserAllowedGroupClient) Interceptors() []Interceptor { + return c.inters.UserAllowedGroup +} + +func (c *UserAllowedGroupClient) mutate(ctx context.Context, m *UserAllowedGroupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserAllowedGroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserAllowedGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserAllowedGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserAllowedGroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserAllowedGroup mutation op: %q", m.Op()) + } +} + +// UserSubscriptionClient is a client for the UserSubscription schema. +type UserSubscriptionClient struct { + config +} + +// NewUserSubscriptionClient returns a client for the UserSubscription from the given config. +func NewUserSubscriptionClient(c config) *UserSubscriptionClient { + return &UserSubscriptionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `usersubscription.Hooks(f(g(h())))`. +func (c *UserSubscriptionClient) Use(hooks ...Hook) { + c.hooks.UserSubscription = append(c.hooks.UserSubscription, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `usersubscription.Intercept(f(g(h())))`. +func (c *UserSubscriptionClient) Intercept(interceptors ...Interceptor) { + c.inters.UserSubscription = append(c.inters.UserSubscription, interceptors...) +} + +// Create returns a builder for creating a UserSubscription entity. +func (c *UserSubscriptionClient) Create() *UserSubscriptionCreate { + mutation := newUserSubscriptionMutation(c.config, OpCreate) + return &UserSubscriptionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserSubscription entities. +func (c *UserSubscriptionClient) CreateBulk(builders ...*UserSubscriptionCreate) *UserSubscriptionCreateBulk { + return &UserSubscriptionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserSubscriptionClient) MapCreateBulk(slice any, setFunc func(*UserSubscriptionCreate, int)) *UserSubscriptionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserSubscriptionCreateBulk{err: fmt.Errorf("calling to UserSubscriptionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserSubscriptionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserSubscriptionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserSubscription. +func (c *UserSubscriptionClient) Update() *UserSubscriptionUpdate { + mutation := newUserSubscriptionMutation(c.config, OpUpdate) + return &UserSubscriptionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserSubscriptionClient) UpdateOne(_m *UserSubscription) *UserSubscriptionUpdateOne { + mutation := newUserSubscriptionMutation(c.config, OpUpdateOne, withUserSubscription(_m)) + return &UserSubscriptionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserSubscriptionClient) UpdateOneID(id int64) *UserSubscriptionUpdateOne { + mutation := newUserSubscriptionMutation(c.config, OpUpdateOne, withUserSubscriptionID(id)) + return &UserSubscriptionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserSubscription. +func (c *UserSubscriptionClient) Delete() *UserSubscriptionDelete { + mutation := newUserSubscriptionMutation(c.config, OpDelete) + return &UserSubscriptionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserSubscriptionClient) DeleteOne(_m *UserSubscription) *UserSubscriptionDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserSubscriptionClient) DeleteOneID(id int64) *UserSubscriptionDeleteOne { + builder := c.Delete().Where(usersubscription.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserSubscriptionDeleteOne{builder} +} + +// Query returns a query builder for UserSubscription. +func (c *UserSubscriptionClient) Query() *UserSubscriptionQuery { + return &UserSubscriptionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserSubscription}, + inters: c.Interceptors(), + } +} + +// Get returns a UserSubscription entity by its id. +func (c *UserSubscriptionClient) Get(ctx context.Context, id int64) (*UserSubscription, error) { + return c.Query().Where(usersubscription.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserSubscriptionClient) GetX(ctx context.Context, id int64) *UserSubscription { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a UserSubscription. +func (c *UserSubscriptionClient) QueryUser(_m *UserSubscription) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.UserTable, usersubscription.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryGroup queries the group edge of a UserSubscription. +func (c *UserSubscriptionClient) QueryGroup(_m *UserSubscription) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.GroupTable, usersubscription.GroupColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAssignedByUser queries the assigned_by_user edge of a UserSubscription. +func (c *UserSubscriptionClient) QueryAssignedByUser(_m *UserSubscription) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.AssignedByUserTable, usersubscription.AssignedByUserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserSubscriptionClient) Hooks() []Hook { + return c.hooks.UserSubscription +} + +// Interceptors returns the client interceptors. +func (c *UserSubscriptionClient) Interceptors() []Interceptor { + return c.inters.UserSubscription +} + +func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscriptionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserSubscriptionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserSubscriptionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserSubscriptionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserSubscriptionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserSubscription mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Account, AccountGroup, ApiKey, Group, Proxy, RedeemCode, Setting, User, + UserAllowedGroup, UserSubscription []ent.Hook + } + inters struct { + Account, AccountGroup, ApiKey, Group, Proxy, RedeemCode, Setting, User, + UserAllowedGroup, UserSubscription []ent.Interceptor + } +) diff --git a/backend/ent/driver_access.go b/backend/ent/driver_access.go new file mode 100644 index 00000000..b0693572 --- /dev/null +++ b/backend/ent/driver_access.go @@ -0,0 +1,8 @@ +package ent + +import "entgo.io/ent/dialect" + +// Driver 暴露底层 driver,供需要 raw SQL 的集成层使用。 +func (c *Client) Driver() dialect.Driver { + return c.driver +} diff --git a/backend/ent/ent.go b/backend/ent/ent.go new file mode 100644 index 00000000..e2c8b56c --- /dev/null +++ b/backend/ent/ent.go @@ -0,0 +1,626 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// checkColumn checks if the column exists in the given table. +func checkColumn(t, c string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + account.Table: account.ValidColumn, + accountgroup.Table: accountgroup.ValidColumn, + apikey.Table: apikey.ValidColumn, + group.Table: group.ValidColumn, + proxy.Table: proxy.ValidColumn, + redeemcode.Table: redeemcode.ValidColumn, + setting.Table: setting.ValidColumn, + user.Table: user.ValidColumn, + userallowedgroup.Table: userallowedgroup.ValidColumn, + usersubscription.Table: usersubscription.ValidColumn, + }) + }) + return columnCheck(t, c) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/backend/ent/enttest/enttest.go b/backend/ent/enttest/enttest.go new file mode 100644 index 00000000..fbeace40 --- /dev/null +++ b/backend/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/ent" + // required by schema hooks. + _ "github.com/Wei-Shaw/sub2api/ent/runtime" + + "entgo.io/ent/dialect/sql/schema" + "github.com/Wei-Shaw/sub2api/ent/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/backend/ent/generate.go b/backend/ent/generate.go new file mode 100644 index 00000000..f2165ed8 --- /dev/null +++ b/backend/ent/generate.go @@ -0,0 +1,3 @@ +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert,intercept --idtype int64 ./schema diff --git a/backend/ent/group.go b/backend/ent/group.go new file mode 100644 index 00000000..fecb202a --- /dev/null +++ b/backend/ent/group.go @@ -0,0 +1,379 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" +) + +// Group is the model entity for the Group schema. +type Group struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description *string `json:"description,omitempty"` + // RateMultiplier holds the value of the "rate_multiplier" field. + RateMultiplier float64 `json:"rate_multiplier,omitempty"` + // IsExclusive holds the value of the "is_exclusive" field. + IsExclusive bool `json:"is_exclusive,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Platform holds the value of the "platform" field. + Platform string `json:"platform,omitempty"` + // SubscriptionType holds the value of the "subscription_type" field. + SubscriptionType string `json:"subscription_type,omitempty"` + // DailyLimitUsd holds the value of the "daily_limit_usd" field. + DailyLimitUsd *float64 `json:"daily_limit_usd,omitempty"` + // WeeklyLimitUsd holds the value of the "weekly_limit_usd" field. + WeeklyLimitUsd *float64 `json:"weekly_limit_usd,omitempty"` + // MonthlyLimitUsd holds the value of the "monthly_limit_usd" field. + MonthlyLimitUsd *float64 `json:"monthly_limit_usd,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the GroupQuery when eager-loading is set. + Edges GroupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// GroupEdges holds the relations/edges for other nodes in the graph. +type GroupEdges struct { + // APIKeys holds the value of the api_keys edge. + APIKeys []*ApiKey `json:"api_keys,omitempty"` + // RedeemCodes holds the value of the redeem_codes edge. + RedeemCodes []*RedeemCode `json:"redeem_codes,omitempty"` + // Subscriptions holds the value of the subscriptions edge. + Subscriptions []*UserSubscription `json:"subscriptions,omitempty"` + // Accounts holds the value of the accounts edge. + Accounts []*Account `json:"accounts,omitempty"` + // AllowedUsers holds the value of the allowed_users edge. + AllowedUsers []*User `json:"allowed_users,omitempty"` + // AccountGroups holds the value of the account_groups edge. + AccountGroups []*AccountGroup `json:"account_groups,omitempty"` + // UserAllowedGroups holds the value of the user_allowed_groups edge. + UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [7]bool +} + +// APIKeysOrErr returns the APIKeys value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) APIKeysOrErr() ([]*ApiKey, error) { + if e.loadedTypes[0] { + return e.APIKeys, nil + } + return nil, &NotLoadedError{edge: "api_keys"} +} + +// RedeemCodesOrErr returns the RedeemCodes value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) RedeemCodesOrErr() ([]*RedeemCode, error) { + if e.loadedTypes[1] { + return e.RedeemCodes, nil + } + return nil, &NotLoadedError{edge: "redeem_codes"} +} + +// SubscriptionsOrErr returns the Subscriptions value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) SubscriptionsOrErr() ([]*UserSubscription, error) { + if e.loadedTypes[2] { + return e.Subscriptions, nil + } + return nil, &NotLoadedError{edge: "subscriptions"} +} + +// AccountsOrErr returns the Accounts value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) AccountsOrErr() ([]*Account, error) { + if e.loadedTypes[3] { + return e.Accounts, nil + } + return nil, &NotLoadedError{edge: "accounts"} +} + +// AllowedUsersOrErr returns the AllowedUsers value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) AllowedUsersOrErr() ([]*User, error) { + if e.loadedTypes[4] { + return e.AllowedUsers, nil + } + return nil, &NotLoadedError{edge: "allowed_users"} +} + +// AccountGroupsOrErr returns the AccountGroups value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) AccountGroupsOrErr() ([]*AccountGroup, error) { + if e.loadedTypes[5] { + return e.AccountGroups, nil + } + return nil, &NotLoadedError{edge: "account_groups"} +} + +// UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) { + if e.loadedTypes[6] { + return e.UserAllowedGroups, nil + } + return nil, &NotLoadedError{edge: "user_allowed_groups"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Group) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case group.FieldIsExclusive: + values[i] = new(sql.NullBool) + case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd: + values[i] = new(sql.NullFloat64) + case group.FieldID: + values[i] = new(sql.NullInt64) + case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType: + values[i] = new(sql.NullString) + case group.FieldCreatedAt, group.FieldUpdatedAt, group.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Group fields. +func (_m *Group) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case group.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case group.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case group.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case group.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case group.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case group.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + _m.Description = new(string) + *_m.Description = value.String + } + case group.FieldRateMultiplier: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i]) + } else if value.Valid { + _m.RateMultiplier = value.Float64 + } + case group.FieldIsExclusive: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field is_exclusive", values[i]) + } else if value.Valid { + _m.IsExclusive = value.Bool + } + case group.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case group.FieldPlatform: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field platform", values[i]) + } else if value.Valid { + _m.Platform = value.String + } + case group.FieldSubscriptionType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field subscription_type", values[i]) + } else if value.Valid { + _m.SubscriptionType = value.String + } + case group.FieldDailyLimitUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field daily_limit_usd", values[i]) + } else if value.Valid { + _m.DailyLimitUsd = new(float64) + *_m.DailyLimitUsd = value.Float64 + } + case group.FieldWeeklyLimitUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field weekly_limit_usd", values[i]) + } else if value.Valid { + _m.WeeklyLimitUsd = new(float64) + *_m.WeeklyLimitUsd = value.Float64 + } + case group.FieldMonthlyLimitUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field monthly_limit_usd", values[i]) + } else if value.Valid { + _m.MonthlyLimitUsd = new(float64) + *_m.MonthlyLimitUsd = value.Float64 + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Group. +// This includes values selected through modifiers, order, etc. +func (_m *Group) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAPIKeys queries the "api_keys" edge of the Group entity. +func (_m *Group) QueryAPIKeys() *ApiKeyQuery { + return NewGroupClient(_m.config).QueryAPIKeys(_m) +} + +// QueryRedeemCodes queries the "redeem_codes" edge of the Group entity. +func (_m *Group) QueryRedeemCodes() *RedeemCodeQuery { + return NewGroupClient(_m.config).QueryRedeemCodes(_m) +} + +// QuerySubscriptions queries the "subscriptions" edge of the Group entity. +func (_m *Group) QuerySubscriptions() *UserSubscriptionQuery { + return NewGroupClient(_m.config).QuerySubscriptions(_m) +} + +// QueryAccounts queries the "accounts" edge of the Group entity. +func (_m *Group) QueryAccounts() *AccountQuery { + return NewGroupClient(_m.config).QueryAccounts(_m) +} + +// QueryAllowedUsers queries the "allowed_users" edge of the Group entity. +func (_m *Group) QueryAllowedUsers() *UserQuery { + return NewGroupClient(_m.config).QueryAllowedUsers(_m) +} + +// QueryAccountGroups queries the "account_groups" edge of the Group entity. +func (_m *Group) QueryAccountGroups() *AccountGroupQuery { + return NewGroupClient(_m.config).QueryAccountGroups(_m) +} + +// QueryUserAllowedGroups queries the "user_allowed_groups" edge of the Group entity. +func (_m *Group) QueryUserAllowedGroups() *UserAllowedGroupQuery { + return NewGroupClient(_m.config).QueryUserAllowedGroups(_m) +} + +// Update returns a builder for updating this Group. +// Note that you need to call Group.Unwrap() before calling this method if this Group +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Group) Update() *GroupUpdateOne { + return NewGroupClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Group entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Group) Unwrap() *Group { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Group is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Group) String() string { + var builder strings.Builder + builder.WriteString("Group(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + if v := _m.Description; v != nil { + builder.WriteString("description=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("rate_multiplier=") + builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier)) + builder.WriteString(", ") + builder.WriteString("is_exclusive=") + builder.WriteString(fmt.Sprintf("%v", _m.IsExclusive)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("platform=") + builder.WriteString(_m.Platform) + builder.WriteString(", ") + builder.WriteString("subscription_type=") + builder.WriteString(_m.SubscriptionType) + builder.WriteString(", ") + if v := _m.DailyLimitUsd; v != nil { + builder.WriteString("daily_limit_usd=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.WeeklyLimitUsd; v != nil { + builder.WriteString("weekly_limit_usd=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.MonthlyLimitUsd; v != nil { + builder.WriteString("monthly_limit_usd=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteByte(')') + return builder.String() +} + +// Groups is a parsable slice of Group. +type Groups []*Group diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go new file mode 100644 index 00000000..05a5673d --- /dev/null +++ b/backend/ent/group/group.go @@ -0,0 +1,396 @@ +// Code generated by ent, DO NOT EDIT. + +package group + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the group type in the database. + Label = "group" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldRateMultiplier holds the string denoting the rate_multiplier field in the database. + FieldRateMultiplier = "rate_multiplier" + // FieldIsExclusive holds the string denoting the is_exclusive field in the database. + FieldIsExclusive = "is_exclusive" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldPlatform holds the string denoting the platform field in the database. + FieldPlatform = "platform" + // FieldSubscriptionType holds the string denoting the subscription_type field in the database. + FieldSubscriptionType = "subscription_type" + // FieldDailyLimitUsd holds the string denoting the daily_limit_usd field in the database. + FieldDailyLimitUsd = "daily_limit_usd" + // FieldWeeklyLimitUsd holds the string denoting the weekly_limit_usd field in the database. + FieldWeeklyLimitUsd = "weekly_limit_usd" + // FieldMonthlyLimitUsd holds the string denoting the monthly_limit_usd field in the database. + FieldMonthlyLimitUsd = "monthly_limit_usd" + // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. + EdgeAPIKeys = "api_keys" + // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. + EdgeRedeemCodes = "redeem_codes" + // EdgeSubscriptions holds the string denoting the subscriptions edge name in mutations. + EdgeSubscriptions = "subscriptions" + // EdgeAccounts holds the string denoting the accounts edge name in mutations. + EdgeAccounts = "accounts" + // EdgeAllowedUsers holds the string denoting the allowed_users edge name in mutations. + EdgeAllowedUsers = "allowed_users" + // EdgeAccountGroups holds the string denoting the account_groups edge name in mutations. + EdgeAccountGroups = "account_groups" + // EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations. + EdgeUserAllowedGroups = "user_allowed_groups" + // Table holds the table name of the group in the database. + Table = "groups" + // APIKeysTable is the table that holds the api_keys relation/edge. + APIKeysTable = "api_keys" + // APIKeysInverseTable is the table name for the ApiKey entity. + // It exists in this package in order to avoid circular dependency with the "apikey" package. + APIKeysInverseTable = "api_keys" + // APIKeysColumn is the table column denoting the api_keys relation/edge. + APIKeysColumn = "group_id" + // RedeemCodesTable is the table that holds the redeem_codes relation/edge. + RedeemCodesTable = "redeem_codes" + // RedeemCodesInverseTable is the table name for the RedeemCode entity. + // It exists in this package in order to avoid circular dependency with the "redeemcode" package. + RedeemCodesInverseTable = "redeem_codes" + // RedeemCodesColumn is the table column denoting the redeem_codes relation/edge. + RedeemCodesColumn = "group_id" + // SubscriptionsTable is the table that holds the subscriptions relation/edge. + SubscriptionsTable = "user_subscriptions" + // SubscriptionsInverseTable is the table name for the UserSubscription entity. + // It exists in this package in order to avoid circular dependency with the "usersubscription" package. + SubscriptionsInverseTable = "user_subscriptions" + // SubscriptionsColumn is the table column denoting the subscriptions relation/edge. + SubscriptionsColumn = "group_id" + // AccountsTable is the table that holds the accounts relation/edge. The primary key declared below. + AccountsTable = "account_groups" + // AccountsInverseTable is the table name for the Account entity. + // It exists in this package in order to avoid circular dependency with the "account" package. + AccountsInverseTable = "accounts" + // AllowedUsersTable is the table that holds the allowed_users relation/edge. The primary key declared below. + AllowedUsersTable = "user_allowed_groups" + // AllowedUsersInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + AllowedUsersInverseTable = "users" + // AccountGroupsTable is the table that holds the account_groups relation/edge. + AccountGroupsTable = "account_groups" + // AccountGroupsInverseTable is the table name for the AccountGroup entity. + // It exists in this package in order to avoid circular dependency with the "accountgroup" package. + AccountGroupsInverseTable = "account_groups" + // AccountGroupsColumn is the table column denoting the account_groups relation/edge. + AccountGroupsColumn = "group_id" + // UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge. + UserAllowedGroupsTable = "user_allowed_groups" + // UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity. + // It exists in this package in order to avoid circular dependency with the "userallowedgroup" package. + UserAllowedGroupsInverseTable = "user_allowed_groups" + // UserAllowedGroupsColumn is the table column denoting the user_allowed_groups relation/edge. + UserAllowedGroupsColumn = "group_id" +) + +// Columns holds all SQL columns for group fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldName, + FieldDescription, + FieldRateMultiplier, + FieldIsExclusive, + FieldStatus, + FieldPlatform, + FieldSubscriptionType, + FieldDailyLimitUsd, + FieldWeeklyLimitUsd, + FieldMonthlyLimitUsd, +} + +var ( + // AccountsPrimaryKey and AccountsColumn2 are the table columns denoting the + // primary key for the accounts relation (M2M). + AccountsPrimaryKey = []string{"account_id", "group_id"} + // AllowedUsersPrimaryKey and AllowedUsersColumn2 are the table columns denoting the + // primary key for the allowed_users relation (M2M). + AllowedUsersPrimaryKey = []string{"user_id", "group_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field. + DefaultRateMultiplier float64 + // DefaultIsExclusive holds the default value on creation for the "is_exclusive" field. + DefaultIsExclusive bool + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultPlatform holds the default value on creation for the "platform" field. + DefaultPlatform string + // PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + PlatformValidator func(string) error + // DefaultSubscriptionType holds the default value on creation for the "subscription_type" field. + DefaultSubscriptionType string + // SubscriptionTypeValidator is a validator for the "subscription_type" field. It is called by the builders before save. + SubscriptionTypeValidator func(string) error +) + +// OrderOption defines the ordering options for the Group queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByRateMultiplier orders the results by the rate_multiplier field. +func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc() +} + +// ByIsExclusive orders the results by the is_exclusive field. +func ByIsExclusive(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsExclusive, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByPlatform orders the results by the platform field. +func ByPlatform(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPlatform, opts...).ToFunc() +} + +// BySubscriptionType orders the results by the subscription_type field. +func BySubscriptionType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSubscriptionType, opts...).ToFunc() +} + +// ByDailyLimitUsd orders the results by the daily_limit_usd field. +func ByDailyLimitUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDailyLimitUsd, opts...).ToFunc() +} + +// ByWeeklyLimitUsd orders the results by the weekly_limit_usd field. +func ByWeeklyLimitUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWeeklyLimitUsd, opts...).ToFunc() +} + +// ByMonthlyLimitUsd orders the results by the monthly_limit_usd field. +func ByMonthlyLimitUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonthlyLimitUsd, opts...).ToFunc() +} + +// ByAPIKeysCount orders the results by api_keys count. +func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAPIKeysStep(), opts...) + } +} + +// ByAPIKeys orders the results by api_keys terms. +func ByAPIKeys(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAPIKeysStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByRedeemCodesCount orders the results by redeem_codes count. +func ByRedeemCodesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRedeemCodesStep(), opts...) + } +} + +// ByRedeemCodes orders the results by redeem_codes terms. +func ByRedeemCodes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRedeemCodesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// BySubscriptionsCount orders the results by subscriptions count. +func BySubscriptionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newSubscriptionsStep(), opts...) + } +} + +// BySubscriptions orders the results by subscriptions terms. +func BySubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newSubscriptionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAccountsCount orders the results by accounts count. +func ByAccountsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAccountsStep(), opts...) + } +} + +// ByAccounts orders the results by accounts terms. +func ByAccounts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAllowedUsersCount orders the results by allowed_users count. +func ByAllowedUsersCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAllowedUsersStep(), opts...) + } +} + +// ByAllowedUsers orders the results by allowed_users terms. +func ByAllowedUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAllowedUsersStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAccountGroupsCount orders the results by account_groups count. +func ByAccountGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAccountGroupsStep(), opts...) + } +} + +// ByAccountGroups orders the results by account_groups terms. +func ByAccountGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUserAllowedGroupsCount orders the results by user_allowed_groups count. +func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUserAllowedGroupsStep(), opts...) + } +} + +// ByUserAllowedGroups orders the results by user_allowed_groups terms. +func ByUserAllowedGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserAllowedGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAPIKeysStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(APIKeysInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) +} +func newRedeemCodesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RedeemCodesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) +} +func newSubscriptionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(SubscriptionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) +} +func newAccountsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AccountsTable, AccountsPrimaryKey...), + ) +} +func newAllowedUsersStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AllowedUsersInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AllowedUsersTable, AllowedUsersPrimaryKey...), + ) +} +func newAccountGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountGroupsInverseTable, AccountGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) +} +func newUserAllowedGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserAllowedGroupsInverseTable, UserAllowedGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) +} diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go new file mode 100644 index 00000000..fd597be9 --- /dev/null +++ b/backend/ent/group/where.go @@ -0,0 +1,962 @@ +// Code generated by ent, DO NOT EDIT. + +package group + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDescription, v)) +} + +// RateMultiplier applies equality check predicate on the "rate_multiplier" field. It's identical to RateMultiplierEQ. +func RateMultiplier(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// IsExclusive applies equality check predicate on the "is_exclusive" field. It's identical to IsExclusiveEQ. +func IsExclusive(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldIsExclusive, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldStatus, v)) +} + +// Platform applies equality check predicate on the "platform" field. It's identical to PlatformEQ. +func Platform(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldPlatform, v)) +} + +// SubscriptionType applies equality check predicate on the "subscription_type" field. It's identical to SubscriptionTypeEQ. +func SubscriptionType(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldSubscriptionType, v)) +} + +// DailyLimitUsd applies equality check predicate on the "daily_limit_usd" field. It's identical to DailyLimitUsdEQ. +func DailyLimitUsd(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDailyLimitUsd, v)) +} + +// WeeklyLimitUsd applies equality check predicate on the "weekly_limit_usd" field. It's identical to WeeklyLimitUsdEQ. +func WeeklyLimitUsd(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldWeeklyLimitUsd, v)) +} + +// MonthlyLimitUsd applies equality check predicate on the "monthly_limit_usd" field. It's identical to MonthlyLimitUsdEQ. +func MonthlyLimitUsd(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldMonthlyLimitUsd, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDeletedAt)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldDescription, v)) +} + +// RateMultiplierEQ applies the EQ predicate on the "rate_multiplier" field. +func RateMultiplierEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierNEQ applies the NEQ predicate on the "rate_multiplier" field. +func RateMultiplierNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierIn applies the In predicate on the "rate_multiplier" field. +func RateMultiplierIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierNotIn applies the NotIn predicate on the "rate_multiplier" field. +func RateMultiplierNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierGT applies the GT predicate on the "rate_multiplier" field. +func RateMultiplierGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldRateMultiplier, v)) +} + +// RateMultiplierGTE applies the GTE predicate on the "rate_multiplier" field. +func RateMultiplierGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldRateMultiplier, v)) +} + +// RateMultiplierLT applies the LT predicate on the "rate_multiplier" field. +func RateMultiplierLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldRateMultiplier, v)) +} + +// RateMultiplierLTE applies the LTE predicate on the "rate_multiplier" field. +func RateMultiplierLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldRateMultiplier, v)) +} + +// IsExclusiveEQ applies the EQ predicate on the "is_exclusive" field. +func IsExclusiveEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldIsExclusive, v)) +} + +// IsExclusiveNEQ applies the NEQ predicate on the "is_exclusive" field. +func IsExclusiveNEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldIsExclusive, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldStatus, v)) +} + +// PlatformEQ applies the EQ predicate on the "platform" field. +func PlatformEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldPlatform, v)) +} + +// PlatformNEQ applies the NEQ predicate on the "platform" field. +func PlatformNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldPlatform, v)) +} + +// PlatformIn applies the In predicate on the "platform" field. +func PlatformIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldPlatform, vs...)) +} + +// PlatformNotIn applies the NotIn predicate on the "platform" field. +func PlatformNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldPlatform, vs...)) +} + +// PlatformGT applies the GT predicate on the "platform" field. +func PlatformGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldPlatform, v)) +} + +// PlatformGTE applies the GTE predicate on the "platform" field. +func PlatformGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldPlatform, v)) +} + +// PlatformLT applies the LT predicate on the "platform" field. +func PlatformLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldPlatform, v)) +} + +// PlatformLTE applies the LTE predicate on the "platform" field. +func PlatformLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldPlatform, v)) +} + +// PlatformContains applies the Contains predicate on the "platform" field. +func PlatformContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldPlatform, v)) +} + +// PlatformHasPrefix applies the HasPrefix predicate on the "platform" field. +func PlatformHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldPlatform, v)) +} + +// PlatformHasSuffix applies the HasSuffix predicate on the "platform" field. +func PlatformHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldPlatform, v)) +} + +// PlatformEqualFold applies the EqualFold predicate on the "platform" field. +func PlatformEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldPlatform, v)) +} + +// PlatformContainsFold applies the ContainsFold predicate on the "platform" field. +func PlatformContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldPlatform, v)) +} + +// SubscriptionTypeEQ applies the EQ predicate on the "subscription_type" field. +func SubscriptionTypeEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldSubscriptionType, v)) +} + +// SubscriptionTypeNEQ applies the NEQ predicate on the "subscription_type" field. +func SubscriptionTypeNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldSubscriptionType, v)) +} + +// SubscriptionTypeIn applies the In predicate on the "subscription_type" field. +func SubscriptionTypeIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldSubscriptionType, vs...)) +} + +// SubscriptionTypeNotIn applies the NotIn predicate on the "subscription_type" field. +func SubscriptionTypeNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldSubscriptionType, vs...)) +} + +// SubscriptionTypeGT applies the GT predicate on the "subscription_type" field. +func SubscriptionTypeGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldSubscriptionType, v)) +} + +// SubscriptionTypeGTE applies the GTE predicate on the "subscription_type" field. +func SubscriptionTypeGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldSubscriptionType, v)) +} + +// SubscriptionTypeLT applies the LT predicate on the "subscription_type" field. +func SubscriptionTypeLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldSubscriptionType, v)) +} + +// SubscriptionTypeLTE applies the LTE predicate on the "subscription_type" field. +func SubscriptionTypeLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldSubscriptionType, v)) +} + +// SubscriptionTypeContains applies the Contains predicate on the "subscription_type" field. +func SubscriptionTypeContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldSubscriptionType, v)) +} + +// SubscriptionTypeHasPrefix applies the HasPrefix predicate on the "subscription_type" field. +func SubscriptionTypeHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldSubscriptionType, v)) +} + +// SubscriptionTypeHasSuffix applies the HasSuffix predicate on the "subscription_type" field. +func SubscriptionTypeHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldSubscriptionType, v)) +} + +// SubscriptionTypeEqualFold applies the EqualFold predicate on the "subscription_type" field. +func SubscriptionTypeEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldSubscriptionType, v)) +} + +// SubscriptionTypeContainsFold applies the ContainsFold predicate on the "subscription_type" field. +func SubscriptionTypeContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldSubscriptionType, v)) +} + +// DailyLimitUsdEQ applies the EQ predicate on the "daily_limit_usd" field. +func DailyLimitUsdEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdNEQ applies the NEQ predicate on the "daily_limit_usd" field. +func DailyLimitUsdNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdIn applies the In predicate on the "daily_limit_usd" field. +func DailyLimitUsdIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDailyLimitUsd, vs...)) +} + +// DailyLimitUsdNotIn applies the NotIn predicate on the "daily_limit_usd" field. +func DailyLimitUsdNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDailyLimitUsd, vs...)) +} + +// DailyLimitUsdGT applies the GT predicate on the "daily_limit_usd" field. +func DailyLimitUsdGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdGTE applies the GTE predicate on the "daily_limit_usd" field. +func DailyLimitUsdGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdLT applies the LT predicate on the "daily_limit_usd" field. +func DailyLimitUsdLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdLTE applies the LTE predicate on the "daily_limit_usd" field. +func DailyLimitUsdLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdIsNil applies the IsNil predicate on the "daily_limit_usd" field. +func DailyLimitUsdIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDailyLimitUsd)) +} + +// DailyLimitUsdNotNil applies the NotNil predicate on the "daily_limit_usd" field. +func DailyLimitUsdNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDailyLimitUsd)) +} + +// WeeklyLimitUsdEQ applies the EQ predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdNEQ applies the NEQ predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdIn applies the In predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldWeeklyLimitUsd, vs...)) +} + +// WeeklyLimitUsdNotIn applies the NotIn predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldWeeklyLimitUsd, vs...)) +} + +// WeeklyLimitUsdGT applies the GT predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdGTE applies the GTE predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdLT applies the LT predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdLTE applies the LTE predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdIsNil applies the IsNil predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldWeeklyLimitUsd)) +} + +// WeeklyLimitUsdNotNil applies the NotNil predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldWeeklyLimitUsd)) +} + +// MonthlyLimitUsdEQ applies the EQ predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdNEQ applies the NEQ predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdIn applies the In predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldMonthlyLimitUsd, vs...)) +} + +// MonthlyLimitUsdNotIn applies the NotIn predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldMonthlyLimitUsd, vs...)) +} + +// MonthlyLimitUsdGT applies the GT predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdGTE applies the GTE predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdLT applies the LT predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdLTE applies the LTE predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdIsNil applies the IsNil predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldMonthlyLimitUsd)) +} + +// MonthlyLimitUsdNotNil applies the NotNil predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldMonthlyLimitUsd)) +} + +// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. +func HasAPIKeys() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAPIKeysWith applies the HasEdge predicate on the "api_keys" edge with a given conditions (other predicates). +func HasAPIKeysWith(preds ...predicate.ApiKey) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAPIKeysStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasRedeemCodes applies the HasEdge predicate on the "redeem_codes" edge. +func HasRedeemCodes() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasRedeemCodesWith applies the HasEdge predicate on the "redeem_codes" edge with a given conditions (other predicates). +func HasRedeemCodesWith(preds ...predicate.RedeemCode) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newRedeemCodesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasSubscriptions applies the HasEdge predicate on the "subscriptions" edge. +func HasSubscriptions() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasSubscriptionsWith applies the HasEdge predicate on the "subscriptions" edge with a given conditions (other predicates). +func HasSubscriptionsWith(preds ...predicate.UserSubscription) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newSubscriptionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAccounts applies the HasEdge predicate on the "accounts" edge. +func HasAccounts() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AccountsTable, AccountsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountsWith applies the HasEdge predicate on the "accounts" edge with a given conditions (other predicates). +func HasAccountsWith(preds ...predicate.Account) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAccountsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAllowedUsers applies the HasEdge predicate on the "allowed_users" edge. +func HasAllowedUsers() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AllowedUsersTable, AllowedUsersPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAllowedUsersWith applies the HasEdge predicate on the "allowed_users" edge with a given conditions (other predicates). +func HasAllowedUsersWith(preds ...predicate.User) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAllowedUsersStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAccountGroups applies the HasEdge predicate on the "account_groups" edge. +func HasAccountGroups() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountGroupsWith applies the HasEdge predicate on the "account_groups" edge with a given conditions (other predicates). +func HasAccountGroupsWith(preds ...predicate.AccountGroup) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAccountGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUserAllowedGroups applies the HasEdge predicate on the "user_allowed_groups" edge. +func HasUserAllowedGroups() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserAllowedGroupsWith applies the HasEdge predicate on the "user_allowed_groups" edge with a given conditions (other predicates). +func HasUserAllowedGroupsWith(preds ...predicate.UserAllowedGroup) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newUserAllowedGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Group) predicate.Group { + return predicate.Group(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Group) predicate.Group { + return predicate.Group(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Group) predicate.Group { + return predicate.Group(sql.NotPredicates(p)) +} diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go new file mode 100644 index 00000000..873cf84c --- /dev/null +++ b/backend/ent/group_create.go @@ -0,0 +1,1555 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// GroupCreate is the builder for creating a Group entity. +type GroupCreate struct { + config + mutation *GroupMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *GroupCreate) SetCreatedAt(v time.Time) *GroupCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *GroupCreate) SetNillableCreatedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *GroupCreate) SetUpdatedAt(v time.Time) *GroupCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *GroupCreate) SetNillableUpdatedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *GroupCreate) SetDeletedAt(v time.Time) *GroupCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *GroupCreate) SetNillableDeletedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *GroupCreate) SetName(v string) *GroupCreate { + _c.mutation.SetName(v) + return _c +} + +// SetDescription sets the "description" field. +func (_c *GroupCreate) SetDescription(v string) *GroupCreate { + _c.mutation.SetDescription(v) + return _c +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_c *GroupCreate) SetNillableDescription(v *string) *GroupCreate { + if v != nil { + _c.SetDescription(*v) + } + return _c +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_c *GroupCreate) SetRateMultiplier(v float64) *GroupCreate { + _c.mutation.SetRateMultiplier(v) + return _c +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_c *GroupCreate) SetNillableRateMultiplier(v *float64) *GroupCreate { + if v != nil { + _c.SetRateMultiplier(*v) + } + return _c +} + +// SetIsExclusive sets the "is_exclusive" field. +func (_c *GroupCreate) SetIsExclusive(v bool) *GroupCreate { + _c.mutation.SetIsExclusive(v) + return _c +} + +// SetNillableIsExclusive sets the "is_exclusive" field if the given value is not nil. +func (_c *GroupCreate) SetNillableIsExclusive(v *bool) *GroupCreate { + if v != nil { + _c.SetIsExclusive(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *GroupCreate) SetStatus(v string) *GroupCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *GroupCreate) SetNillableStatus(v *string) *GroupCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetPlatform sets the "platform" field. +func (_c *GroupCreate) SetPlatform(v string) *GroupCreate { + _c.mutation.SetPlatform(v) + return _c +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_c *GroupCreate) SetNillablePlatform(v *string) *GroupCreate { + if v != nil { + _c.SetPlatform(*v) + } + return _c +} + +// SetSubscriptionType sets the "subscription_type" field. +func (_c *GroupCreate) SetSubscriptionType(v string) *GroupCreate { + _c.mutation.SetSubscriptionType(v) + return _c +} + +// SetNillableSubscriptionType sets the "subscription_type" field if the given value is not nil. +func (_c *GroupCreate) SetNillableSubscriptionType(v *string) *GroupCreate { + if v != nil { + _c.SetSubscriptionType(*v) + } + return _c +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (_c *GroupCreate) SetDailyLimitUsd(v float64) *GroupCreate { + _c.mutation.SetDailyLimitUsd(v) + return _c +} + +// SetNillableDailyLimitUsd sets the "daily_limit_usd" field if the given value is not nil. +func (_c *GroupCreate) SetNillableDailyLimitUsd(v *float64) *GroupCreate { + if v != nil { + _c.SetDailyLimitUsd(*v) + } + return _c +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (_c *GroupCreate) SetWeeklyLimitUsd(v float64) *GroupCreate { + _c.mutation.SetWeeklyLimitUsd(v) + return _c +} + +// SetNillableWeeklyLimitUsd sets the "weekly_limit_usd" field if the given value is not nil. +func (_c *GroupCreate) SetNillableWeeklyLimitUsd(v *float64) *GroupCreate { + if v != nil { + _c.SetWeeklyLimitUsd(*v) + } + return _c +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (_c *GroupCreate) SetMonthlyLimitUsd(v float64) *GroupCreate { + _c.mutation.SetMonthlyLimitUsd(v) + return _c +} + +// SetNillableMonthlyLimitUsd sets the "monthly_limit_usd" field if the given value is not nil. +func (_c *GroupCreate) SetNillableMonthlyLimitUsd(v *float64) *GroupCreate { + if v != nil { + _c.SetMonthlyLimitUsd(*v) + } + return _c +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by IDs. +func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate { + _c.mutation.AddAPIKeyIDs(ids...) + return _c +} + +// AddAPIKeys adds the "api_keys" edges to the ApiKey entity. +func (_c *GroupCreate) AddAPIKeys(v ...*ApiKey) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_c *GroupCreate) AddRedeemCodeIDs(ids ...int64) *GroupCreate { + _c.mutation.AddRedeemCodeIDs(ids...) + return _c +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_c *GroupCreate) AddRedeemCodes(v ...*RedeemCode) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_c *GroupCreate) AddSubscriptionIDs(ids ...int64) *GroupCreate { + _c.mutation.AddSubscriptionIDs(ids...) + return _c +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_c *GroupCreate) AddSubscriptions(v ...*UserSubscription) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddSubscriptionIDs(ids...) +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_c *GroupCreate) AddAccountIDs(ids ...int64) *GroupCreate { + _c.mutation.AddAccountIDs(ids...) + return _c +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_c *GroupCreate) AddAccounts(v ...*Account) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAccountIDs(ids...) +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by IDs. +func (_c *GroupCreate) AddAllowedUserIDs(ids ...int64) *GroupCreate { + _c.mutation.AddAllowedUserIDs(ids...) + return _c +} + +// AddAllowedUsers adds the "allowed_users" edges to the User entity. +func (_c *GroupCreate) AddAllowedUsers(v ...*User) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAllowedUserIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (_c *GroupCreate) Mutation() *GroupMutation { + return _c.mutation +} + +// Save creates the Group in the database. +func (_c *GroupCreate) Save(ctx context.Context) (*Group, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *GroupCreate) SaveX(ctx context.Context) *Group { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *GroupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *GroupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *GroupCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if group.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized group.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := group.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if group.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized group.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := group.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + v := group.DefaultRateMultiplier + _c.mutation.SetRateMultiplier(v) + } + if _, ok := _c.mutation.IsExclusive(); !ok { + v := group.DefaultIsExclusive + _c.mutation.SetIsExclusive(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := group.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.Platform(); !ok { + v := group.DefaultPlatform + _c.mutation.SetPlatform(v) + } + if _, ok := _c.mutation.SubscriptionType(); !ok { + v := group.DefaultSubscriptionType + _c.mutation.SetSubscriptionType(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *GroupCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Group.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Group.updated_at"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Group.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} + } + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + return &ValidationError{Name: "rate_multiplier", err: errors.New(`ent: missing required field "Group.rate_multiplier"`)} + } + if _, ok := _c.mutation.IsExclusive(); !ok { + return &ValidationError{Name: "is_exclusive", err: errors.New(`ent: missing required field "Group.is_exclusive"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Group.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := group.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Group.status": %w`, err)} + } + } + if _, ok := _c.mutation.Platform(); !ok { + return &ValidationError{Name: "platform", err: errors.New(`ent: missing required field "Group.platform"`)} + } + if v, ok := _c.mutation.Platform(); ok { + if err := group.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Group.platform": %w`, err)} + } + } + if _, ok := _c.mutation.SubscriptionType(); !ok { + return &ValidationError{Name: "subscription_type", err: errors.New(`ent: missing required field "Group.subscription_type"`)} + } + if v, ok := _c.mutation.SubscriptionType(); ok { + if err := group.SubscriptionTypeValidator(v); err != nil { + return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)} + } + } + return nil +} + +func (_c *GroupCreate) sqlSave(ctx context.Context) (*Group, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { + var ( + _node = &Group{config: _c.config} + _spec = sqlgraph.NewCreateSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(group.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + _node.Description = &value + } + if value, ok := _c.mutation.RateMultiplier(); ok { + _spec.SetField(group.FieldRateMultiplier, field.TypeFloat64, value) + _node.RateMultiplier = value + } + if value, ok := _c.mutation.IsExclusive(); ok { + _spec.SetField(group.FieldIsExclusive, field.TypeBool, value) + _node.IsExclusive = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(group.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Platform(); ok { + _spec.SetField(group.FieldPlatform, field.TypeString, value) + _node.Platform = value + } + if value, ok := _c.mutation.SubscriptionType(); ok { + _spec.SetField(group.FieldSubscriptionType, field.TypeString, value) + _node.SubscriptionType = value + } + if value, ok := _c.mutation.DailyLimitUsd(); ok { + _spec.SetField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + _node.DailyLimitUsd = &value + } + if value, ok := _c.mutation.WeeklyLimitUsd(); ok { + _spec.SetField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + _node.WeeklyLimitUsd = &value + } + if value, ok := _c.mutation.MonthlyLimitUsd(); ok { + _spec.SetField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + _node.MonthlyLimitUsd = &value + } + if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _c.config, mutation: newAccountGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AllowedUsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _c.config, mutation: newUserAllowedGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Group.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.GroupUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *GroupCreate) OnConflict(opts ...sql.ConflictOption) *GroupUpsertOne { + _c.conflict = opts + return &GroupUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *GroupCreate) OnConflictColumns(columns ...string) *GroupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &GroupUpsertOne{ + create: _c, + } +} + +type ( + // GroupUpsertOne is the builder for "upsert"-ing + // one Group node. + GroupUpsertOne struct { + create *GroupCreate + } + + // GroupUpsert is the "OnConflict" setter. + GroupUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *GroupUpsert) SetUpdatedAt(v time.Time) *GroupUpsert { + u.Set(group.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *GroupUpsert) UpdateUpdatedAt() *GroupUpsert { + u.SetExcluded(group.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *GroupUpsert) SetDeletedAt(v time.Time) *GroupUpsert { + u.Set(group.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *GroupUpsert) UpdateDeletedAt() *GroupUpsert { + u.SetExcluded(group.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *GroupUpsert) ClearDeletedAt() *GroupUpsert { + u.SetNull(group.FieldDeletedAt) + return u +} + +// SetName sets the "name" field. +func (u *GroupUpsert) SetName(v string) *GroupUpsert { + u.Set(group.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *GroupUpsert) UpdateName() *GroupUpsert { + u.SetExcluded(group.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *GroupUpsert) SetDescription(v string) *GroupUpsert { + u.Set(group.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *GroupUpsert) UpdateDescription() *GroupUpsert { + u.SetExcluded(group.FieldDescription) + return u +} + +// ClearDescription clears the value of the "description" field. +func (u *GroupUpsert) ClearDescription() *GroupUpsert { + u.SetNull(group.FieldDescription) + return u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *GroupUpsert) SetRateMultiplier(v float64) *GroupUpsert { + u.Set(group.FieldRateMultiplier, v) + return u +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *GroupUpsert) UpdateRateMultiplier() *GroupUpsert { + u.SetExcluded(group.FieldRateMultiplier) + return u +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *GroupUpsert) AddRateMultiplier(v float64) *GroupUpsert { + u.Add(group.FieldRateMultiplier, v) + return u +} + +// SetIsExclusive sets the "is_exclusive" field. +func (u *GroupUpsert) SetIsExclusive(v bool) *GroupUpsert { + u.Set(group.FieldIsExclusive, v) + return u +} + +// UpdateIsExclusive sets the "is_exclusive" field to the value that was provided on create. +func (u *GroupUpsert) UpdateIsExclusive() *GroupUpsert { + u.SetExcluded(group.FieldIsExclusive) + return u +} + +// SetStatus sets the "status" field. +func (u *GroupUpsert) SetStatus(v string) *GroupUpsert { + u.Set(group.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *GroupUpsert) UpdateStatus() *GroupUpsert { + u.SetExcluded(group.FieldStatus) + return u +} + +// SetPlatform sets the "platform" field. +func (u *GroupUpsert) SetPlatform(v string) *GroupUpsert { + u.Set(group.FieldPlatform, v) + return u +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *GroupUpsert) UpdatePlatform() *GroupUpsert { + u.SetExcluded(group.FieldPlatform) + return u +} + +// SetSubscriptionType sets the "subscription_type" field. +func (u *GroupUpsert) SetSubscriptionType(v string) *GroupUpsert { + u.Set(group.FieldSubscriptionType, v) + return u +} + +// UpdateSubscriptionType sets the "subscription_type" field to the value that was provided on create. +func (u *GroupUpsert) UpdateSubscriptionType() *GroupUpsert { + u.SetExcluded(group.FieldSubscriptionType) + return u +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (u *GroupUpsert) SetDailyLimitUsd(v float64) *GroupUpsert { + u.Set(group.FieldDailyLimitUsd, v) + return u +} + +// UpdateDailyLimitUsd sets the "daily_limit_usd" field to the value that was provided on create. +func (u *GroupUpsert) UpdateDailyLimitUsd() *GroupUpsert { + u.SetExcluded(group.FieldDailyLimitUsd) + return u +} + +// AddDailyLimitUsd adds v to the "daily_limit_usd" field. +func (u *GroupUpsert) AddDailyLimitUsd(v float64) *GroupUpsert { + u.Add(group.FieldDailyLimitUsd, v) + return u +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (u *GroupUpsert) ClearDailyLimitUsd() *GroupUpsert { + u.SetNull(group.FieldDailyLimitUsd) + return u +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (u *GroupUpsert) SetWeeklyLimitUsd(v float64) *GroupUpsert { + u.Set(group.FieldWeeklyLimitUsd, v) + return u +} + +// UpdateWeeklyLimitUsd sets the "weekly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsert) UpdateWeeklyLimitUsd() *GroupUpsert { + u.SetExcluded(group.FieldWeeklyLimitUsd) + return u +} + +// AddWeeklyLimitUsd adds v to the "weekly_limit_usd" field. +func (u *GroupUpsert) AddWeeklyLimitUsd(v float64) *GroupUpsert { + u.Add(group.FieldWeeklyLimitUsd, v) + return u +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (u *GroupUpsert) ClearWeeklyLimitUsd() *GroupUpsert { + u.SetNull(group.FieldWeeklyLimitUsd) + return u +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (u *GroupUpsert) SetMonthlyLimitUsd(v float64) *GroupUpsert { + u.Set(group.FieldMonthlyLimitUsd, v) + return u +} + +// UpdateMonthlyLimitUsd sets the "monthly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsert) UpdateMonthlyLimitUsd() *GroupUpsert { + u.SetExcluded(group.FieldMonthlyLimitUsd) + return u +} + +// AddMonthlyLimitUsd adds v to the "monthly_limit_usd" field. +func (u *GroupUpsert) AddMonthlyLimitUsd(v float64) *GroupUpsert { + u.Add(group.FieldMonthlyLimitUsd, v) + return u +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (u *GroupUpsert) ClearMonthlyLimitUsd() *GroupUpsert { + u.SetNull(group.FieldMonthlyLimitUsd) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *GroupUpsertOne) UpdateNewValues() *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(group.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *GroupUpsertOne) Ignore() *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *GroupUpsertOne) DoNothing() *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the GroupCreate.OnConflict +// documentation for more info. +func (u *GroupUpsertOne) Update(set func(*GroupUpsert)) *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&GroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *GroupUpsertOne) SetUpdatedAt(v time.Time) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateUpdatedAt() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *GroupUpsertOne) SetDeletedAt(v time.Time) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateDeletedAt() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *GroupUpsertOne) ClearDeletedAt() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *GroupUpsertOne) SetName(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateName() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *GroupUpsertOne) SetDescription(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateDescription() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *GroupUpsertOne) ClearDescription() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearDescription() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *GroupUpsertOne) SetRateMultiplier(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *GroupUpsertOne) AddRateMultiplier(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateRateMultiplier() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetIsExclusive sets the "is_exclusive" field. +func (u *GroupUpsertOne) SetIsExclusive(v bool) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetIsExclusive(v) + }) +} + +// UpdateIsExclusive sets the "is_exclusive" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateIsExclusive() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateIsExclusive() + }) +} + +// SetStatus sets the "status" field. +func (u *GroupUpsertOne) SetStatus(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateStatus() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateStatus() + }) +} + +// SetPlatform sets the "platform" field. +func (u *GroupUpsertOne) SetPlatform(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdatePlatform() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdatePlatform() + }) +} + +// SetSubscriptionType sets the "subscription_type" field. +func (u *GroupUpsertOne) SetSubscriptionType(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetSubscriptionType(v) + }) +} + +// UpdateSubscriptionType sets the "subscription_type" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateSubscriptionType() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateSubscriptionType() + }) +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (u *GroupUpsertOne) SetDailyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetDailyLimitUsd(v) + }) +} + +// AddDailyLimitUsd adds v to the "daily_limit_usd" field. +func (u *GroupUpsertOne) AddDailyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddDailyLimitUsd(v) + }) +} + +// UpdateDailyLimitUsd sets the "daily_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateDailyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateDailyLimitUsd() + }) +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (u *GroupUpsertOne) ClearDailyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearDailyLimitUsd() + }) +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (u *GroupUpsertOne) SetWeeklyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetWeeklyLimitUsd(v) + }) +} + +// AddWeeklyLimitUsd adds v to the "weekly_limit_usd" field. +func (u *GroupUpsertOne) AddWeeklyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddWeeklyLimitUsd(v) + }) +} + +// UpdateWeeklyLimitUsd sets the "weekly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateWeeklyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateWeeklyLimitUsd() + }) +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (u *GroupUpsertOne) ClearWeeklyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearWeeklyLimitUsd() + }) +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (u *GroupUpsertOne) SetMonthlyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetMonthlyLimitUsd(v) + }) +} + +// AddMonthlyLimitUsd adds v to the "monthly_limit_usd" field. +func (u *GroupUpsertOne) AddMonthlyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddMonthlyLimitUsd(v) + }) +} + +// UpdateMonthlyLimitUsd sets the "monthly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateMonthlyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateMonthlyLimitUsd() + }) +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (u *GroupUpsertOne) ClearMonthlyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearMonthlyLimitUsd() + }) +} + +// Exec executes the query. +func (u *GroupUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for GroupCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *GroupUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *GroupUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *GroupUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// GroupCreateBulk is the builder for creating many Group entities in bulk. +type GroupCreateBulk struct { + config + err error + builders []*GroupCreate + conflict []sql.ConflictOption +} + +// Save creates the Group entities in the database. +func (_c *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Group, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*GroupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *GroupCreateBulk) SaveX(ctx context.Context) []*Group { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *GroupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *GroupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Group.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.GroupUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *GroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *GroupUpsertBulk { + _c.conflict = opts + return &GroupUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *GroupCreateBulk) OnConflictColumns(columns ...string) *GroupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &GroupUpsertBulk{ + create: _c, + } +} + +// GroupUpsertBulk is the builder for "upsert"-ing +// a bulk of Group nodes. +type GroupUpsertBulk struct { + create *GroupCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *GroupUpsertBulk) UpdateNewValues() *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(group.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *GroupUpsertBulk) Ignore() *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *GroupUpsertBulk) DoNothing() *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the GroupCreateBulk.OnConflict +// documentation for more info. +func (u *GroupUpsertBulk) Update(set func(*GroupUpsert)) *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&GroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *GroupUpsertBulk) SetUpdatedAt(v time.Time) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateUpdatedAt() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *GroupUpsertBulk) SetDeletedAt(v time.Time) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateDeletedAt() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *GroupUpsertBulk) ClearDeletedAt() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *GroupUpsertBulk) SetName(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateName() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *GroupUpsertBulk) SetDescription(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateDescription() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *GroupUpsertBulk) ClearDescription() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearDescription() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *GroupUpsertBulk) SetRateMultiplier(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *GroupUpsertBulk) AddRateMultiplier(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateRateMultiplier() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetIsExclusive sets the "is_exclusive" field. +func (u *GroupUpsertBulk) SetIsExclusive(v bool) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetIsExclusive(v) + }) +} + +// UpdateIsExclusive sets the "is_exclusive" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateIsExclusive() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateIsExclusive() + }) +} + +// SetStatus sets the "status" field. +func (u *GroupUpsertBulk) SetStatus(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateStatus() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateStatus() + }) +} + +// SetPlatform sets the "platform" field. +func (u *GroupUpsertBulk) SetPlatform(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdatePlatform() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdatePlatform() + }) +} + +// SetSubscriptionType sets the "subscription_type" field. +func (u *GroupUpsertBulk) SetSubscriptionType(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetSubscriptionType(v) + }) +} + +// UpdateSubscriptionType sets the "subscription_type" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateSubscriptionType() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateSubscriptionType() + }) +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (u *GroupUpsertBulk) SetDailyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetDailyLimitUsd(v) + }) +} + +// AddDailyLimitUsd adds v to the "daily_limit_usd" field. +func (u *GroupUpsertBulk) AddDailyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddDailyLimitUsd(v) + }) +} + +// UpdateDailyLimitUsd sets the "daily_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateDailyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateDailyLimitUsd() + }) +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (u *GroupUpsertBulk) ClearDailyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearDailyLimitUsd() + }) +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (u *GroupUpsertBulk) SetWeeklyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetWeeklyLimitUsd(v) + }) +} + +// AddWeeklyLimitUsd adds v to the "weekly_limit_usd" field. +func (u *GroupUpsertBulk) AddWeeklyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddWeeklyLimitUsd(v) + }) +} + +// UpdateWeeklyLimitUsd sets the "weekly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateWeeklyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateWeeklyLimitUsd() + }) +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (u *GroupUpsertBulk) ClearWeeklyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearWeeklyLimitUsd() + }) +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (u *GroupUpsertBulk) SetMonthlyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetMonthlyLimitUsd(v) + }) +} + +// AddMonthlyLimitUsd adds v to the "monthly_limit_usd" field. +func (u *GroupUpsertBulk) AddMonthlyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddMonthlyLimitUsd(v) + }) +} + +// UpdateMonthlyLimitUsd sets the "monthly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateMonthlyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateMonthlyLimitUsd() + }) +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (u *GroupUpsertBulk) ClearMonthlyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearMonthlyLimitUsd() + }) +} + +// Exec executes the query. +func (u *GroupUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the GroupCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for GroupCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *GroupUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/group_delete.go b/backend/ent/group_delete.go new file mode 100644 index 00000000..6587466f --- /dev/null +++ b/backend/ent/group_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// GroupDelete is the builder for deleting a Group entity. +type GroupDelete struct { + config + hooks []Hook + mutation *GroupMutation +} + +// Where appends a list predicates to the GroupDelete builder. +func (_d *GroupDelete) Where(ps ...predicate.Group) *GroupDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *GroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *GroupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *GroupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// GroupDeleteOne is the builder for deleting a single Group entity. +type GroupDeleteOne struct { + _d *GroupDelete +} + +// Where appends a list predicates to the GroupDelete builder. +func (_d *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *GroupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{group.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *GroupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/group_query.go b/backend/ent/group_query.go new file mode 100644 index 00000000..0b86e069 --- /dev/null +++ b/backend/ent/group_query.go @@ -0,0 +1,1118 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// GroupQuery is the builder for querying Group entities. +type GroupQuery struct { + config + ctx *QueryContext + order []group.OrderOption + inters []Interceptor + predicates []predicate.Group + withAPIKeys *ApiKeyQuery + withRedeemCodes *RedeemCodeQuery + withSubscriptions *UserSubscriptionQuery + withAccounts *AccountQuery + withAllowedUsers *UserQuery + withAccountGroups *AccountGroupQuery + withUserAllowedGroups *UserAllowedGroupQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the GroupQuery builder. +func (_q *GroupQuery) Where(ps ...predicate.Group) *GroupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *GroupQuery) Limit(limit int) *GroupQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *GroupQuery) Offset(offset int) *GroupQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *GroupQuery) Unique(unique bool) *GroupQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *GroupQuery) Order(o ...group.OrderOption) *GroupQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAPIKeys chains the current query on the "api_keys" edge. +func (_q *GroupQuery) QueryAPIKeys() *ApiKeyQuery { + query := (&ApiKeyClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.APIKeysTable, group.APIKeysColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryRedeemCodes chains the current query on the "redeem_codes" edge. +func (_q *GroupQuery) QueryRedeemCodes() *RedeemCodeQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.RedeemCodesTable, group.RedeemCodesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QuerySubscriptions chains the current query on the "subscriptions" edge. +func (_q *GroupQuery) QuerySubscriptions() *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.SubscriptionsTable, group.SubscriptionsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAccounts chains the current query on the "accounts" edge. +func (_q *GroupQuery) QueryAccounts() *AccountQuery { + query := (&AccountClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AccountsTable, group.AccountsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAllowedUsers chains the current query on the "allowed_users" edge. +func (_q *GroupQuery) QueryAllowedUsers() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AllowedUsersTable, group.AllowedUsersPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAccountGroups chains the current query on the "account_groups" edge. +func (_q *GroupQuery) QueryAccountGroups() *AccountGroupQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(accountgroup.Table, accountgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.AccountGroupsTable, group.AccountGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUserAllowedGroups chains the current query on the "user_allowed_groups" edge. +func (_q *GroupQuery) QueryUserAllowedGroups() *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.UserAllowedGroupsTable, group.UserAllowedGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Group entity from the query. +// Returns a *NotFoundError when no Group was found. +func (_q *GroupQuery) First(ctx context.Context) (*Group, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{group.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *GroupQuery) FirstX(ctx context.Context) *Group { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Group ID from the query. +// Returns a *NotFoundError when no Group ID was found. +func (_q *GroupQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{group.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *GroupQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Group entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Group entity is found. +// Returns a *NotFoundError when no Group entities are found. +func (_q *GroupQuery) Only(ctx context.Context) (*Group, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{group.Label} + default: + return nil, &NotSingularError{group.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *GroupQuery) OnlyX(ctx context.Context) *Group { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Group ID in the query. +// Returns a *NotSingularError when more than one Group ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *GroupQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{group.Label} + default: + err = &NotSingularError{group.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *GroupQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Groups. +func (_q *GroupQuery) All(ctx context.Context) ([]*Group, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Group, *GroupQuery]() + return withInterceptors[[]*Group](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *GroupQuery) AllX(ctx context.Context) []*Group { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Group IDs. +func (_q *GroupQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(group.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *GroupQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *GroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*GroupQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *GroupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *GroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *GroupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the GroupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *GroupQuery) Clone() *GroupQuery { + if _q == nil { + return nil + } + return &GroupQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]group.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Group{}, _q.predicates...), + withAPIKeys: _q.withAPIKeys.Clone(), + withRedeemCodes: _q.withRedeemCodes.Clone(), + withSubscriptions: _q.withSubscriptions.Clone(), + withAccounts: _q.withAccounts.Clone(), + withAllowedUsers: _q.withAllowedUsers.Clone(), + withAccountGroups: _q.withAccountGroups.Clone(), + withUserAllowedGroups: _q.withUserAllowedGroups.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAPIKeys tells the query-builder to eager-load the nodes that are connected to +// the "api_keys" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAPIKeys(opts ...func(*ApiKeyQuery)) *GroupQuery { + query := (&ApiKeyClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAPIKeys = query + return _q +} + +// WithRedeemCodes tells the query-builder to eager-load the nodes that are connected to +// the "redeem_codes" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithRedeemCodes(opts ...func(*RedeemCodeQuery)) *GroupQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withRedeemCodes = query + return _q +} + +// WithSubscriptions tells the query-builder to eager-load the nodes that are connected to +// the "subscriptions" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithSubscriptions(opts ...func(*UserSubscriptionQuery)) *GroupQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withSubscriptions = query + return _q +} + +// WithAccounts tells the query-builder to eager-load the nodes that are connected to +// the "accounts" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAccounts(opts ...func(*AccountQuery)) *GroupQuery { + query := (&AccountClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccounts = query + return _q +} + +// WithAllowedUsers tells the query-builder to eager-load the nodes that are connected to +// the "allowed_users" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAllowedUsers(opts ...func(*UserQuery)) *GroupQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAllowedUsers = query + return _q +} + +// WithAccountGroups tells the query-builder to eager-load the nodes that are connected to +// the "account_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAccountGroups(opts ...func(*AccountGroupQuery)) *GroupQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccountGroups = query + return _q +} + +// WithUserAllowedGroups tells the query-builder to eager-load the nodes that are connected to +// the "user_allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithUserAllowedGroups(opts ...func(*UserAllowedGroupQuery)) *GroupQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUserAllowedGroups = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Group.Query(). +// GroupBy(group.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &GroupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = group.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Group.Query(). +// Select(group.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *GroupQuery) Select(fields ...string) *GroupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &GroupSelect{GroupQuery: _q} + sbuild.label = group.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a GroupSelect configured with the given aggregations. +func (_q *GroupQuery) Aggregate(fns ...AggregateFunc) *GroupSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *GroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !group.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, error) { + var ( + nodes = []*Group{} + _spec = _q.querySpec() + loadedTypes = [7]bool{ + _q.withAPIKeys != nil, + _q.withRedeemCodes != nil, + _q.withSubscriptions != nil, + _q.withAccounts != nil, + _q.withAllowedUsers != nil, + _q.withAccountGroups != nil, + _q.withUserAllowedGroups != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Group).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Group{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAPIKeys; query != nil { + if err := _q.loadAPIKeys(ctx, query, nodes, + func(n *Group) { n.Edges.APIKeys = []*ApiKey{} }, + func(n *Group, e *ApiKey) { n.Edges.APIKeys = append(n.Edges.APIKeys, e) }); err != nil { + return nil, err + } + } + if query := _q.withRedeemCodes; query != nil { + if err := _q.loadRedeemCodes(ctx, query, nodes, + func(n *Group) { n.Edges.RedeemCodes = []*RedeemCode{} }, + func(n *Group, e *RedeemCode) { n.Edges.RedeemCodes = append(n.Edges.RedeemCodes, e) }); err != nil { + return nil, err + } + } + if query := _q.withSubscriptions; query != nil { + if err := _q.loadSubscriptions(ctx, query, nodes, + func(n *Group) { n.Edges.Subscriptions = []*UserSubscription{} }, + func(n *Group, e *UserSubscription) { n.Edges.Subscriptions = append(n.Edges.Subscriptions, e) }); err != nil { + return nil, err + } + } + if query := _q.withAccounts; query != nil { + if err := _q.loadAccounts(ctx, query, nodes, + func(n *Group) { n.Edges.Accounts = []*Account{} }, + func(n *Group, e *Account) { n.Edges.Accounts = append(n.Edges.Accounts, e) }); err != nil { + return nil, err + } + } + if query := _q.withAllowedUsers; query != nil { + if err := _q.loadAllowedUsers(ctx, query, nodes, + func(n *Group) { n.Edges.AllowedUsers = []*User{} }, + func(n *Group, e *User) { n.Edges.AllowedUsers = append(n.Edges.AllowedUsers, e) }); err != nil { + return nil, err + } + } + if query := _q.withAccountGroups; query != nil { + if err := _q.loadAccountGroups(ctx, query, nodes, + func(n *Group) { n.Edges.AccountGroups = []*AccountGroup{} }, + func(n *Group, e *AccountGroup) { n.Edges.AccountGroups = append(n.Edges.AccountGroups, e) }); err != nil { + return nil, err + } + } + if query := _q.withUserAllowedGroups; query != nil { + if err := _q.loadUserAllowedGroups(ctx, query, nodes, + func(n *Group) { n.Edges.UserAllowedGroups = []*UserAllowedGroup{} }, + func(n *Group, e *UserAllowedGroup) { n.Edges.UserAllowedGroups = append(n.Edges.UserAllowedGroups, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *GroupQuery) loadAPIKeys(ctx context.Context, query *ApiKeyQuery, nodes []*Group, init func(*Group), assign func(*Group, *ApiKey)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(apikey.FieldGroupID) + } + query.Where(predicate.ApiKey(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.APIKeysColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + if fk == nil { + return fmt.Errorf(`foreign-key "group_id" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadRedeemCodes(ctx context.Context, query *RedeemCodeQuery, nodes []*Group, init func(*Group), assign func(*Group, *RedeemCode)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(redeemcode.FieldGroupID) + } + query.Where(predicate.RedeemCode(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.RedeemCodesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + if fk == nil { + return fmt.Errorf(`foreign-key "group_id" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadSubscriptions(ctx context.Context, query *UserSubscriptionQuery, nodes []*Group, init func(*Group), assign func(*Group, *UserSubscription)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usersubscription.FieldGroupID) + } + query.Where(predicate.UserSubscription(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.SubscriptionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadAccounts(ctx context.Context, query *AccountQuery, nodes []*Group, init func(*Group), assign func(*Group, *Account)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*Group) + nids := make(map[int64]map[*Group]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(group.AccountsTable) + s.Join(joinT).On(s.C(account.FieldID), joinT.C(group.AccountsPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(group.AccountsPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(group.AccountsPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*Group]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Account](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "accounts" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *GroupQuery) loadAllowedUsers(ctx context.Context, query *UserQuery, nodes []*Group, init func(*Group), assign func(*Group, *User)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*Group) + nids := make(map[int64]map[*Group]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(group.AllowedUsersTable) + s.Join(joinT).On(s.C(user.FieldID), joinT.C(group.AllowedUsersPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(group.AllowedUsersPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(group.AllowedUsersPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*Group]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*User](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "allowed_users" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *GroupQuery) loadAccountGroups(ctx context.Context, query *AccountGroupQuery, nodes []*Group, init func(*Group), assign func(*Group, *AccountGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(accountgroup.FieldGroupID) + } + query.Where(predicate.AccountGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.AccountGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllowedGroupQuery, nodes []*Group, init func(*Group), assign func(*Group, *UserAllowedGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(userallowedgroup.FieldGroupID) + } + query.Where(predicate.UserAllowedGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.UserAllowedGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} + +func (_q *GroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *GroupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) + for i := range fields { + if fields[i] != group.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(group.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = group.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// GroupGroupBy is the group-by builder for Group entities. +type GroupGroupBy struct { + selector + build *GroupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *GroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GroupQuery, *GroupGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *GroupGroupBy) sqlScan(ctx context.Context, root *GroupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// GroupSelect is the builder for selecting fields of Group entities. +type GroupSelect struct { + *GroupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *GroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GroupQuery, *GroupSelect](ctx, _s.GroupQuery, _s, _s.inters, v) +} + +func (_s *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go new file mode 100644 index 00000000..0ed1e3fd --- /dev/null +++ b/backend/ent/group_update.go @@ -0,0 +1,1687 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// GroupUpdate is the builder for updating Group entities. +type GroupUpdate struct { + config + hooks []Hook + mutation *GroupMutation +} + +// Where appends a list predicates to the GroupUpdate builder. +func (_u *GroupUpdate) Where(ps ...predicate.Group) *GroupUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *GroupUpdate) SetUpdatedAt(v time.Time) *GroupUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *GroupUpdate) SetDeletedAt(v time.Time) *GroupUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableDeletedAt(v *time.Time) *GroupUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *GroupUpdate) ClearDeletedAt() *GroupUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *GroupUpdate) SetName(v string) *GroupUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableName(v *string) *GroupUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *GroupUpdate) SetDescription(v string) *GroupUpdate { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableDescription(v *string) *GroupUpdate { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// ClearDescription clears the value of the "description" field. +func (_u *GroupUpdate) ClearDescription() *GroupUpdate { + _u.mutation.ClearDescription() + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *GroupUpdate) SetRateMultiplier(v float64) *GroupUpdate { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableRateMultiplier(v *float64) *GroupUpdate { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *GroupUpdate) AddRateMultiplier(v float64) *GroupUpdate { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetIsExclusive sets the "is_exclusive" field. +func (_u *GroupUpdate) SetIsExclusive(v bool) *GroupUpdate { + _u.mutation.SetIsExclusive(v) + return _u +} + +// SetNillableIsExclusive sets the "is_exclusive" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableIsExclusive(v *bool) *GroupUpdate { + if v != nil { + _u.SetIsExclusive(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *GroupUpdate) SetStatus(v string) *GroupUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableStatus(v *string) *GroupUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *GroupUpdate) SetPlatform(v string) *GroupUpdate { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *GroupUpdate) SetNillablePlatform(v *string) *GroupUpdate { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetSubscriptionType sets the "subscription_type" field. +func (_u *GroupUpdate) SetSubscriptionType(v string) *GroupUpdate { + _u.mutation.SetSubscriptionType(v) + return _u +} + +// SetNillableSubscriptionType sets the "subscription_type" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableSubscriptionType(v *string) *GroupUpdate { + if v != nil { + _u.SetSubscriptionType(*v) + } + return _u +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (_u *GroupUpdate) SetDailyLimitUsd(v float64) *GroupUpdate { + _u.mutation.ResetDailyLimitUsd() + _u.mutation.SetDailyLimitUsd(v) + return _u +} + +// SetNillableDailyLimitUsd sets the "daily_limit_usd" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableDailyLimitUsd(v *float64) *GroupUpdate { + if v != nil { + _u.SetDailyLimitUsd(*v) + } + return _u +} + +// AddDailyLimitUsd adds value to the "daily_limit_usd" field. +func (_u *GroupUpdate) AddDailyLimitUsd(v float64) *GroupUpdate { + _u.mutation.AddDailyLimitUsd(v) + return _u +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (_u *GroupUpdate) ClearDailyLimitUsd() *GroupUpdate { + _u.mutation.ClearDailyLimitUsd() + return _u +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (_u *GroupUpdate) SetWeeklyLimitUsd(v float64) *GroupUpdate { + _u.mutation.ResetWeeklyLimitUsd() + _u.mutation.SetWeeklyLimitUsd(v) + return _u +} + +// SetNillableWeeklyLimitUsd sets the "weekly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableWeeklyLimitUsd(v *float64) *GroupUpdate { + if v != nil { + _u.SetWeeklyLimitUsd(*v) + } + return _u +} + +// AddWeeklyLimitUsd adds value to the "weekly_limit_usd" field. +func (_u *GroupUpdate) AddWeeklyLimitUsd(v float64) *GroupUpdate { + _u.mutation.AddWeeklyLimitUsd(v) + return _u +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (_u *GroupUpdate) ClearWeeklyLimitUsd() *GroupUpdate { + _u.mutation.ClearWeeklyLimitUsd() + return _u +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (_u *GroupUpdate) SetMonthlyLimitUsd(v float64) *GroupUpdate { + _u.mutation.ResetMonthlyLimitUsd() + _u.mutation.SetMonthlyLimitUsd(v) + return _u +} + +// SetNillableMonthlyLimitUsd sets the "monthly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableMonthlyLimitUsd(v *float64) *GroupUpdate { + if v != nil { + _u.SetMonthlyLimitUsd(*v) + } + return _u +} + +// AddMonthlyLimitUsd adds value to the "monthly_limit_usd" field. +func (_u *GroupUpdate) AddMonthlyLimitUsd(v float64) *GroupUpdate { + _u.mutation.AddMonthlyLimitUsd(v) + return _u +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (_u *GroupUpdate) ClearMonthlyLimitUsd() *GroupUpdate { + _u.mutation.ClearMonthlyLimitUsd() + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by IDs. +func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the ApiKey entity. +func (_u *GroupUpdate) AddAPIKeys(v ...*ApiKey) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *GroupUpdate) AddRedeemCodeIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdate) AddRedeemCodes(v ...*RedeemCode) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *GroupUpdate) AddSubscriptionIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdate) AddSubscriptions(v ...*UserSubscription) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_u *GroupUpdate) AddAccountIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddAccountIDs(ids...) + return _u +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_u *GroupUpdate) AddAccounts(v ...*Account) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAccountIDs(ids...) +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by IDs. +func (_u *GroupUpdate) AddAllowedUserIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddAllowedUserIDs(ids...) + return _u +} + +// AddAllowedUsers adds the "allowed_users" edges to the User entity. +func (_u *GroupUpdate) AddAllowedUsers(v ...*User) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedUserIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (_u *GroupUpdate) Mutation() *GroupMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the ApiKey entity. +func (_u *GroupUpdate) ClearAPIKeys() *GroupUpdate { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to ApiKey entities by IDs. +func (_u *GroupUpdate) RemoveAPIKeyIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to ApiKey entities. +func (_u *GroupUpdate) RemoveAPIKeys(v ...*ApiKey) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdate) ClearRedeemCodes() *GroupUpdate { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *GroupUpdate) RemoveRedeemCodeIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *GroupUpdate) RemoveRedeemCodes(v ...*RedeemCode) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdate) ClearSubscriptions() *GroupUpdate { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *GroupUpdate) RemoveSubscriptionIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *GroupUpdate) RemoveSubscriptions(v ...*UserSubscription) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearAccounts clears all "accounts" edges to the Account entity. +func (_u *GroupUpdate) ClearAccounts() *GroupUpdate { + _u.mutation.ClearAccounts() + return _u +} + +// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs. +func (_u *GroupUpdate) RemoveAccountIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveAccountIDs(ids...) + return _u +} + +// RemoveAccounts removes "accounts" edges to Account entities. +func (_u *GroupUpdate) RemoveAccounts(v ...*Account) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAccountIDs(ids...) +} + +// ClearAllowedUsers clears all "allowed_users" edges to the User entity. +func (_u *GroupUpdate) ClearAllowedUsers() *GroupUpdate { + _u.mutation.ClearAllowedUsers() + return _u +} + +// RemoveAllowedUserIDs removes the "allowed_users" edge to User entities by IDs. +func (_u *GroupUpdate) RemoveAllowedUserIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveAllowedUserIDs(ids...) + return _u +} + +// RemoveAllowedUsers removes "allowed_users" edges to User entities. +func (_u *GroupUpdate) RemoveAllowedUsers(v ...*User) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedUserIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *GroupUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *GroupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *GroupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *GroupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *GroupUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if group.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized group.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := group.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *GroupUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := group.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Group.status": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := group.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Group.platform": %w`, err)} + } + } + if v, ok := _u.mutation.SubscriptionType(); ok { + if err := group.SubscriptionTypeValidator(v); err != nil { + return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)} + } + } + return nil +} + +func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(group.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + } + if _u.mutation.DescriptionCleared() { + _spec.ClearField(group.FieldDescription, field.TypeString) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.IsExclusive(); ok { + _spec.SetField(group.FieldIsExclusive, field.TypeBool, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(group.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(group.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.SubscriptionType(); ok { + _spec.SetField(group.FieldSubscriptionType, field.TypeString, value) + } + if value, ok := _u.mutation.DailyLimitUsd(); ok { + _spec.SetField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyLimitUsd(); ok { + _spec.AddField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.DailyLimitUsdCleared() { + _spec.ClearField(group.FieldDailyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.WeeklyLimitUsd(); ok { + _spec.SetField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyLimitUsd(); ok { + _spec.AddField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.WeeklyLimitUsdCleared() { + _spec.ClearField(group.FieldWeeklyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.MonthlyLimitUsd(); ok { + _spec.SetField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyLimitUsd(); ok { + _spec.AddField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.MonthlyLimitUsdCleared() { + _spec.ClearField(group.FieldMonthlyLimitUsd, field.TypeFloat64) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedUsersIDs(); len(nodes) > 0 && !_u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedUsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{group.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// GroupUpdateOne is the builder for updating a single Group entity. +type GroupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *GroupMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *GroupUpdateOne) SetUpdatedAt(v time.Time) *GroupUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *GroupUpdateOne) SetDeletedAt(v time.Time) *GroupUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableDeletedAt(v *time.Time) *GroupUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *GroupUpdateOne) ClearDeletedAt() *GroupUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *GroupUpdateOne) SetName(v string) *GroupUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableName(v *string) *GroupUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *GroupUpdateOne) SetDescription(v string) *GroupUpdateOne { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableDescription(v *string) *GroupUpdateOne { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// ClearDescription clears the value of the "description" field. +func (_u *GroupUpdateOne) ClearDescription() *GroupUpdateOne { + _u.mutation.ClearDescription() + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *GroupUpdateOne) SetRateMultiplier(v float64) *GroupUpdateOne { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableRateMultiplier(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *GroupUpdateOne) AddRateMultiplier(v float64) *GroupUpdateOne { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetIsExclusive sets the "is_exclusive" field. +func (_u *GroupUpdateOne) SetIsExclusive(v bool) *GroupUpdateOne { + _u.mutation.SetIsExclusive(v) + return _u +} + +// SetNillableIsExclusive sets the "is_exclusive" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableIsExclusive(v *bool) *GroupUpdateOne { + if v != nil { + _u.SetIsExclusive(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *GroupUpdateOne) SetStatus(v string) *GroupUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableStatus(v *string) *GroupUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *GroupUpdateOne) SetPlatform(v string) *GroupUpdateOne { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillablePlatform(v *string) *GroupUpdateOne { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetSubscriptionType sets the "subscription_type" field. +func (_u *GroupUpdateOne) SetSubscriptionType(v string) *GroupUpdateOne { + _u.mutation.SetSubscriptionType(v) + return _u +} + +// SetNillableSubscriptionType sets the "subscription_type" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableSubscriptionType(v *string) *GroupUpdateOne { + if v != nil { + _u.SetSubscriptionType(*v) + } + return _u +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (_u *GroupUpdateOne) SetDailyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.ResetDailyLimitUsd() + _u.mutation.SetDailyLimitUsd(v) + return _u +} + +// SetNillableDailyLimitUsd sets the "daily_limit_usd" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableDailyLimitUsd(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetDailyLimitUsd(*v) + } + return _u +} + +// AddDailyLimitUsd adds value to the "daily_limit_usd" field. +func (_u *GroupUpdateOne) AddDailyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.AddDailyLimitUsd(v) + return _u +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (_u *GroupUpdateOne) ClearDailyLimitUsd() *GroupUpdateOne { + _u.mutation.ClearDailyLimitUsd() + return _u +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (_u *GroupUpdateOne) SetWeeklyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.ResetWeeklyLimitUsd() + _u.mutation.SetWeeklyLimitUsd(v) + return _u +} + +// SetNillableWeeklyLimitUsd sets the "weekly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableWeeklyLimitUsd(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetWeeklyLimitUsd(*v) + } + return _u +} + +// AddWeeklyLimitUsd adds value to the "weekly_limit_usd" field. +func (_u *GroupUpdateOne) AddWeeklyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.AddWeeklyLimitUsd(v) + return _u +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (_u *GroupUpdateOne) ClearWeeklyLimitUsd() *GroupUpdateOne { + _u.mutation.ClearWeeklyLimitUsd() + return _u +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (_u *GroupUpdateOne) SetMonthlyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.ResetMonthlyLimitUsd() + _u.mutation.SetMonthlyLimitUsd(v) + return _u +} + +// SetNillableMonthlyLimitUsd sets the "monthly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableMonthlyLimitUsd(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetMonthlyLimitUsd(*v) + } + return _u +} + +// AddMonthlyLimitUsd adds value to the "monthly_limit_usd" field. +func (_u *GroupUpdateOne) AddMonthlyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.AddMonthlyLimitUsd(v) + return _u +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (_u *GroupUpdateOne) ClearMonthlyLimitUsd() *GroupUpdateOne { + _u.mutation.ClearMonthlyLimitUsd() + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by IDs. +func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the ApiKey entity. +func (_u *GroupUpdateOne) AddAPIKeys(v ...*ApiKey) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *GroupUpdateOne) AddRedeemCodeIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdateOne) AddRedeemCodes(v ...*RedeemCode) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *GroupUpdateOne) AddSubscriptionIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdateOne) AddSubscriptions(v ...*UserSubscription) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_u *GroupUpdateOne) AddAccountIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddAccountIDs(ids...) + return _u +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_u *GroupUpdateOne) AddAccounts(v ...*Account) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAccountIDs(ids...) +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by IDs. +func (_u *GroupUpdateOne) AddAllowedUserIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddAllowedUserIDs(ids...) + return _u +} + +// AddAllowedUsers adds the "allowed_users" edges to the User entity. +func (_u *GroupUpdateOne) AddAllowedUsers(v ...*User) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedUserIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (_u *GroupUpdateOne) Mutation() *GroupMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the ApiKey entity. +func (_u *GroupUpdateOne) ClearAPIKeys() *GroupUpdateOne { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to ApiKey entities by IDs. +func (_u *GroupUpdateOne) RemoveAPIKeyIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to ApiKey entities. +func (_u *GroupUpdateOne) RemoveAPIKeys(v ...*ApiKey) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdateOne) ClearRedeemCodes() *GroupUpdateOne { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *GroupUpdateOne) RemoveRedeemCodeIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *GroupUpdateOne) RemoveRedeemCodes(v ...*RedeemCode) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdateOne) ClearSubscriptions() *GroupUpdateOne { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *GroupUpdateOne) RemoveSubscriptionIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *GroupUpdateOne) RemoveSubscriptions(v ...*UserSubscription) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearAccounts clears all "accounts" edges to the Account entity. +func (_u *GroupUpdateOne) ClearAccounts() *GroupUpdateOne { + _u.mutation.ClearAccounts() + return _u +} + +// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs. +func (_u *GroupUpdateOne) RemoveAccountIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveAccountIDs(ids...) + return _u +} + +// RemoveAccounts removes "accounts" edges to Account entities. +func (_u *GroupUpdateOne) RemoveAccounts(v ...*Account) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAccountIDs(ids...) +} + +// ClearAllowedUsers clears all "allowed_users" edges to the User entity. +func (_u *GroupUpdateOne) ClearAllowedUsers() *GroupUpdateOne { + _u.mutation.ClearAllowedUsers() + return _u +} + +// RemoveAllowedUserIDs removes the "allowed_users" edge to User entities by IDs. +func (_u *GroupUpdateOne) RemoveAllowedUserIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveAllowedUserIDs(ids...) + return _u +} + +// RemoveAllowedUsers removes "allowed_users" edges to User entities. +func (_u *GroupUpdateOne) RemoveAllowedUsers(v ...*User) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedUserIDs(ids...) +} + +// Where appends a list predicates to the GroupUpdate builder. +func (_u *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Group entity. +func (_u *GroupUpdateOne) Save(ctx context.Context) (*Group, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *GroupUpdateOne) SaveX(ctx context.Context) *Group { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *GroupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *GroupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *GroupUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if group.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized group.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := group.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *GroupUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := group.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Group.status": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := group.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Group.platform": %w`, err)} + } + } + if v, ok := _u.mutation.SubscriptionType(); ok { + if err := group.SubscriptionTypeValidator(v); err != nil { + return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)} + } + } + return nil +} + +func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Group.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) + for _, f := range fields { + if !group.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != group.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(group.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + } + if _u.mutation.DescriptionCleared() { + _spec.ClearField(group.FieldDescription, field.TypeString) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.IsExclusive(); ok { + _spec.SetField(group.FieldIsExclusive, field.TypeBool, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(group.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(group.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.SubscriptionType(); ok { + _spec.SetField(group.FieldSubscriptionType, field.TypeString, value) + } + if value, ok := _u.mutation.DailyLimitUsd(); ok { + _spec.SetField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyLimitUsd(); ok { + _spec.AddField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.DailyLimitUsdCleared() { + _spec.ClearField(group.FieldDailyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.WeeklyLimitUsd(); ok { + _spec.SetField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyLimitUsd(); ok { + _spec.AddField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.WeeklyLimitUsdCleared() { + _spec.ClearField(group.FieldWeeklyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.MonthlyLimitUsd(); ok { + _spec.SetField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyLimitUsd(); ok { + _spec.AddField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.MonthlyLimitUsdCleared() { + _spec.ClearField(group.FieldMonthlyLimitUsd, field.TypeFloat64) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedUsersIDs(); len(nodes) > 0 && !_u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedUsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Group{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{group.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go new file mode 100644 index 00000000..46933bb0 --- /dev/null +++ b/backend/ent/hook/hook.go @@ -0,0 +1,307 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/Wei-Shaw/sub2api/ent" +) + +// The AccountFunc type is an adapter to allow the use of ordinary +// function as Account mutator. +type AccountFunc func(context.Context, *ent.AccountMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AccountFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AccountMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountMutation", m) +} + +// The AccountGroupFunc type is an adapter to allow the use of ordinary +// function as AccountGroup mutator. +type AccountGroupFunc func(context.Context, *ent.AccountGroupMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AccountGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AccountGroupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountGroupMutation", m) +} + +// The ApiKeyFunc type is an adapter to allow the use of ordinary +// function as ApiKey mutator. +type ApiKeyFunc func(context.Context, *ent.ApiKeyMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ApiKeyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ApiKeyMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ApiKeyMutation", m) +} + +// The GroupFunc type is an adapter to allow the use of ordinary +// function as Group mutator. +type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.GroupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m) +} + +// The ProxyFunc type is an adapter to allow the use of ordinary +// function as Proxy mutator. +type ProxyFunc func(context.Context, *ent.ProxyMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ProxyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ProxyMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProxyMutation", m) +} + +// The RedeemCodeFunc type is an adapter to allow the use of ordinary +// function as RedeemCode mutator. +type RedeemCodeFunc func(context.Context, *ent.RedeemCodeMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f RedeemCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.RedeemCodeMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RedeemCodeMutation", m) +} + +// The SettingFunc type is an adapter to allow the use of ordinary +// function as Setting mutator. +type SettingFunc func(context.Context, *ent.SettingMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f SettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.SettingMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SettingMutation", m) +} + +// The UserFunc type is an adapter to allow the use of ordinary +// function as User mutator. +type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) +} + +// The UserAllowedGroupFunc type is an adapter to allow the use of ordinary +// function as UserAllowedGroup mutator. +type UserAllowedGroupFunc func(context.Context, *ent.UserAllowedGroupMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserAllowedGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserAllowedGroupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAllowedGroupMutation", m) +} + +// The UserSubscriptionFunc type is an adapter to allow the use of ordinary +// function as UserSubscription mutator. +type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserSubscriptionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserSubscriptionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserSubscriptionMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go new file mode 100644 index 00000000..ab5f5554 --- /dev/null +++ b/backend/ent/intercept/intercept.go @@ -0,0 +1,419 @@ +// Code generated by ent, DO NOT EDIT. + +package intercept + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// The Query interface represents an operation that queries a graph. +// By using this interface, users can write generic code that manipulates +// query builders of different types. +type Query interface { + // Type returns the string representation of the query type. + Type() string + // Limit the number of records to be returned by this query. + Limit(int) + // Offset to start from. + Offset(int) + // Unique configures the query builder to filter duplicate records. + Unique(bool) + // Order specifies how the records should be ordered. + Order(...func(*sql.Selector)) + // WhereP appends storage-level predicates to the query builder. Using this method, users + // can use type-assertion to append predicates that do not depend on any generated package. + WhereP(...func(*sql.Selector)) +} + +// The Func type is an adapter that allows ordinary functions to be used as interceptors. +// Unlike traversal functions, interceptors are skipped during graph traversals. Note that the +// implementation of Func is different from the one defined in entgo.io/ent.InterceptFunc. +type Func func(context.Context, Query) error + +// Intercept calls f(ctx, q) and then applied the next Querier. +func (f Func) Intercept(next ent.Querier) ent.Querier { + return ent.QuerierFunc(func(ctx context.Context, q ent.Query) (ent.Value, error) { + query, err := NewQuery(q) + if err != nil { + return nil, err + } + if err := f(ctx, query); err != nil { + return nil, err + } + return next.Query(ctx, q) + }) +} + +// The TraverseFunc type is an adapter to allow the use of ordinary function as Traverser. +// If f is a function with the appropriate signature, TraverseFunc(f) is a Traverser that calls f. +type TraverseFunc func(context.Context, Query) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseFunc) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseFunc) Traverse(ctx context.Context, q ent.Query) error { + query, err := NewQuery(q) + if err != nil { + return err + } + return f(ctx, query) +} + +// The AccountFunc type is an adapter to allow the use of ordinary function as a Querier. +type AccountFunc func(context.Context, *ent.AccountQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AccountFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AccountQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AccountQuery", q) +} + +// The TraverseAccount type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAccount func(context.Context, *ent.AccountQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAccount) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAccount) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AccountQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AccountQuery", q) +} + +// The AccountGroupFunc type is an adapter to allow the use of ordinary function as a Querier. +type AccountGroupFunc func(context.Context, *ent.AccountGroupQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AccountGroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AccountGroupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q) +} + +// The TraverseAccountGroup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAccountGroup func(context.Context, *ent.AccountGroupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAccountGroup) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAccountGroup) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AccountGroupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q) +} + +// The ApiKeyFunc type is an adapter to allow the use of ordinary function as a Querier. +type ApiKeyFunc func(context.Context, *ent.ApiKeyQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f ApiKeyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.ApiKeyQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.ApiKeyQuery", q) +} + +// The TraverseApiKey type is an adapter to allow the use of ordinary function as Traverser. +type TraverseApiKey func(context.Context, *ent.ApiKeyQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseApiKey) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseApiKey) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.ApiKeyQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.ApiKeyQuery", q) +} + +// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier. +type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f GroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.GroupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q) +} + +// The TraverseGroup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseGroup func(context.Context, *ent.GroupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseGroup) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.GroupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q) +} + +// The ProxyFunc type is an adapter to allow the use of ordinary function as a Querier. +type ProxyFunc func(context.Context, *ent.ProxyQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f ProxyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.ProxyQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.ProxyQuery", q) +} + +// The TraverseProxy type is an adapter to allow the use of ordinary function as Traverser. +type TraverseProxy func(context.Context, *ent.ProxyQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseProxy) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseProxy) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.ProxyQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.ProxyQuery", q) +} + +// The RedeemCodeFunc type is an adapter to allow the use of ordinary function as a Querier. +type RedeemCodeFunc func(context.Context, *ent.RedeemCodeQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f RedeemCodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.RedeemCodeQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q) +} + +// The TraverseRedeemCode type is an adapter to allow the use of ordinary function as Traverser. +type TraverseRedeemCode func(context.Context, *ent.RedeemCodeQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseRedeemCode) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseRedeemCode) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.RedeemCodeQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q) +} + +// The SettingFunc type is an adapter to allow the use of ordinary function as a Querier. +type SettingFunc func(context.Context, *ent.SettingQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f SettingFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.SettingQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q) +} + +// The TraverseSetting type is an adapter to allow the use of ordinary function as Traverser. +type TraverseSetting func(context.Context, *ent.SettingQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseSetting) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseSetting) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.SettingQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q) +} + +// The UserFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserFunc func(context.Context, *ent.UserQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q) +} + +// The TraverseUser type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUser func(context.Context, *ent.UserQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUser) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUser) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q) +} + +// The UserAllowedGroupFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserAllowedGroupFunc func(context.Context, *ent.UserAllowedGroupQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserAllowedGroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserAllowedGroupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAllowedGroupQuery", q) +} + +// The TraverseUserAllowedGroup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserAllowedGroup func(context.Context, *ent.UserAllowedGroupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserAllowedGroup) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserAllowedGroup) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserAllowedGroupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserAllowedGroupQuery", q) +} + +// The UserSubscriptionFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserSubscriptionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserSubscriptionQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserSubscriptionQuery", q) +} + +// The TraverseUserSubscription type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserSubscription func(context.Context, *ent.UserSubscriptionQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserSubscription) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserSubscription) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserSubscriptionQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserSubscriptionQuery", q) +} + +// NewQuery returns the generic Query interface for the given typed query. +func NewQuery(q ent.Query) (Query, error) { + switch q := q.(type) { + case *ent.AccountQuery: + return &query[*ent.AccountQuery, predicate.Account, account.OrderOption]{typ: ent.TypeAccount, tq: q}, nil + case *ent.AccountGroupQuery: + return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil + case *ent.ApiKeyQuery: + return &query[*ent.ApiKeyQuery, predicate.ApiKey, apikey.OrderOption]{typ: ent.TypeApiKey, tq: q}, nil + case *ent.GroupQuery: + return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil + case *ent.ProxyQuery: + return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil + case *ent.RedeemCodeQuery: + return &query[*ent.RedeemCodeQuery, predicate.RedeemCode, redeemcode.OrderOption]{typ: ent.TypeRedeemCode, tq: q}, nil + case *ent.SettingQuery: + return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil + case *ent.UserQuery: + return &query[*ent.UserQuery, predicate.User, user.OrderOption]{typ: ent.TypeUser, tq: q}, nil + case *ent.UserAllowedGroupQuery: + return &query[*ent.UserAllowedGroupQuery, predicate.UserAllowedGroup, userallowedgroup.OrderOption]{typ: ent.TypeUserAllowedGroup, tq: q}, nil + case *ent.UserSubscriptionQuery: + return &query[*ent.UserSubscriptionQuery, predicate.UserSubscription, usersubscription.OrderOption]{typ: ent.TypeUserSubscription, tq: q}, nil + default: + return nil, fmt.Errorf("unknown query type %T", q) + } +} + +type query[T any, P ~func(*sql.Selector), R ~func(*sql.Selector)] struct { + typ string + tq interface { + Limit(int) T + Offset(int) T + Unique(bool) T + Order(...R) T + Where(...P) T + } +} + +func (q query[T, P, R]) Type() string { + return q.typ +} + +func (q query[T, P, R]) Limit(limit int) { + q.tq.Limit(limit) +} + +func (q query[T, P, R]) Offset(offset int) { + q.tq.Offset(offset) +} + +func (q query[T, P, R]) Unique(unique bool) { + q.tq.Unique(unique) +} + +func (q query[T, P, R]) Order(orders ...func(*sql.Selector)) { + rs := make([]R, len(orders)) + for i := range orders { + rs[i] = orders[i] + } + q.tq.Order(rs...) +} + +func (q query[T, P, R]) WhereP(ps ...func(*sql.Selector)) { + p := make([]P, len(ps)) + for i := range ps { + p[i] = ps[i] + } + q.tq.Where(p...) +} diff --git a/backend/ent/migrate/migrate.go b/backend/ent/migrate/migrate.go new file mode 100644 index 00000000..1956a6bf --- /dev/null +++ b/backend/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go new file mode 100644 index 00000000..45408760 --- /dev/null +++ b/backend/ent/migrate/schema.go @@ -0,0 +1,568 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // AccountsColumns holds the columns for the "accounts" table. + AccountsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "platform", Type: field.TypeString, Size: 50}, + {Name: "type", Type: field.TypeString, Size: 20}, + {Name: "credentials", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "extra", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "proxy_id", Type: field.TypeInt64, Nullable: true}, + {Name: "concurrency", Type: field.TypeInt, Default: 3}, + {Name: "priority", Type: field.TypeInt, Default: 50}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "error_message", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "last_used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "schedulable", Type: field.TypeBool, Default: true}, + {Name: "rate_limited_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "rate_limit_reset_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "overload_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "session_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "session_window_end", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "session_window_status", Type: field.TypeString, Nullable: true, Size: 20}, + } + // AccountsTable holds the schema information for the "accounts" table. + AccountsTable = &schema.Table{ + Name: "accounts", + Columns: AccountsColumns, + PrimaryKey: []*schema.Column{AccountsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "account_platform", + Unique: false, + Columns: []*schema.Column{AccountsColumns[5]}, + }, + { + Name: "account_type", + Unique: false, + Columns: []*schema.Column{AccountsColumns[6]}, + }, + { + Name: "account_status", + Unique: false, + Columns: []*schema.Column{AccountsColumns[12]}, + }, + { + Name: "account_proxy_id", + Unique: false, + Columns: []*schema.Column{AccountsColumns[9]}, + }, + { + Name: "account_priority", + Unique: false, + Columns: []*schema.Column{AccountsColumns[11]}, + }, + { + Name: "account_last_used_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[14]}, + }, + { + Name: "account_schedulable", + Unique: false, + Columns: []*schema.Column{AccountsColumns[15]}, + }, + { + Name: "account_rate_limited_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[16]}, + }, + { + Name: "account_rate_limit_reset_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[17]}, + }, + { + Name: "account_overload_until", + Unique: false, + Columns: []*schema.Column{AccountsColumns[18]}, + }, + { + Name: "account_deleted_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[3]}, + }, + }, + } + // AccountGroupsColumns holds the columns for the "account_groups" table. + AccountGroupsColumns = []*schema.Column{ + {Name: "priority", Type: field.TypeInt, Default: 50}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "account_id", Type: field.TypeInt64}, + {Name: "group_id", Type: field.TypeInt64}, + } + // AccountGroupsTable holds the schema information for the "account_groups" table. + AccountGroupsTable = &schema.Table{ + Name: "account_groups", + Columns: AccountGroupsColumns, + PrimaryKey: []*schema.Column{AccountGroupsColumns[2], AccountGroupsColumns[3]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "account_groups_accounts_account", + Columns: []*schema.Column{AccountGroupsColumns[2]}, + RefColumns: []*schema.Column{AccountsColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "account_groups_groups_group", + Columns: []*schema.Column{AccountGroupsColumns[3]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "accountgroup_group_id", + Unique: false, + Columns: []*schema.Column{AccountGroupsColumns[3]}, + }, + { + Name: "accountgroup_priority", + Unique: false, + Columns: []*schema.Column{AccountGroupsColumns[0]}, + }, + }, + } + // APIKeysColumns holds the columns for the "api_keys" table. + APIKeysColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "key", Type: field.TypeString, Unique: true, Size: 128}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "group_id", Type: field.TypeInt64, Nullable: true}, + {Name: "user_id", Type: field.TypeInt64}, + } + // APIKeysTable holds the schema information for the "api_keys" table. + APIKeysTable = &schema.Table{ + Name: "api_keys", + Columns: APIKeysColumns, + PrimaryKey: []*schema.Column{APIKeysColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "api_keys_groups_api_keys", + Columns: []*schema.Column{APIKeysColumns[7]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "api_keys_users_api_keys", + Columns: []*schema.Column{APIKeysColumns[8]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "apikey_key", + Unique: true, + Columns: []*schema.Column{APIKeysColumns[4]}, + }, + { + Name: "apikey_user_id", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[8]}, + }, + { + Name: "apikey_group_id", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[7]}, + }, + { + Name: "apikey_status", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[6]}, + }, + { + Name: "apikey_deleted_at", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[3]}, + }, + }, + } + // GroupsColumns holds the columns for the "groups" table. + GroupsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "name", Type: field.TypeString, Unique: true, Size: 100}, + {Name: "description", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, + {Name: "is_exclusive", Type: field.TypeBool, Default: false}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "platform", Type: field.TypeString, Size: 50, Default: "anthropic"}, + {Name: "subscription_type", Type: field.TypeString, Size: 20, Default: "standard"}, + {Name: "daily_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "weekly_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "monthly_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + } + // GroupsTable holds the schema information for the "groups" table. + GroupsTable = &schema.Table{ + Name: "groups", + Columns: GroupsColumns, + PrimaryKey: []*schema.Column{GroupsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "group_name", + Unique: true, + Columns: []*schema.Column{GroupsColumns[4]}, + }, + { + Name: "group_status", + Unique: false, + Columns: []*schema.Column{GroupsColumns[8]}, + }, + { + Name: "group_platform", + Unique: false, + Columns: []*schema.Column{GroupsColumns[9]}, + }, + { + Name: "group_subscription_type", + Unique: false, + Columns: []*schema.Column{GroupsColumns[10]}, + }, + { + Name: "group_is_exclusive", + Unique: false, + Columns: []*schema.Column{GroupsColumns[7]}, + }, + { + Name: "group_deleted_at", + Unique: false, + Columns: []*schema.Column{GroupsColumns[3]}, + }, + }, + } + // ProxiesColumns holds the columns for the "proxies" table. + ProxiesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "protocol", Type: field.TypeString, Size: 20}, + {Name: "host", Type: field.TypeString, Size: 255}, + {Name: "port", Type: field.TypeInt}, + {Name: "username", Type: field.TypeString, Nullable: true, Size: 100}, + {Name: "password", Type: field.TypeString, Nullable: true, Size: 100}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + } + // ProxiesTable holds the schema information for the "proxies" table. + ProxiesTable = &schema.Table{ + Name: "proxies", + Columns: ProxiesColumns, + PrimaryKey: []*schema.Column{ProxiesColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "proxy_status", + Unique: false, + Columns: []*schema.Column{ProxiesColumns[10]}, + }, + { + Name: "proxy_deleted_at", + Unique: false, + Columns: []*schema.Column{ProxiesColumns[3]}, + }, + }, + } + // RedeemCodesColumns holds the columns for the "redeem_codes" table. + RedeemCodesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "code", Type: field.TypeString, Unique: true, Size: 32}, + {Name: "type", Type: field.TypeString, Size: 20, Default: "balance"}, + {Name: "value", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "unused"}, + {Name: "used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "validity_days", Type: field.TypeInt, Default: 30}, + {Name: "group_id", Type: field.TypeInt64, Nullable: true}, + {Name: "used_by", Type: field.TypeInt64, Nullable: true}, + } + // RedeemCodesTable holds the schema information for the "redeem_codes" table. + RedeemCodesTable = &schema.Table{ + Name: "redeem_codes", + Columns: RedeemCodesColumns, + PrimaryKey: []*schema.Column{RedeemCodesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "redeem_codes_groups_redeem_codes", + Columns: []*schema.Column{RedeemCodesColumns[9]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "redeem_codes_users_redeem_codes", + Columns: []*schema.Column{RedeemCodesColumns[10]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "redeemcode_code", + Unique: true, + Columns: []*schema.Column{RedeemCodesColumns[1]}, + }, + { + Name: "redeemcode_status", + Unique: false, + Columns: []*schema.Column{RedeemCodesColumns[4]}, + }, + { + Name: "redeemcode_used_by", + Unique: false, + Columns: []*schema.Column{RedeemCodesColumns[10]}, + }, + { + Name: "redeemcode_group_id", + Unique: false, + Columns: []*schema.Column{RedeemCodesColumns[9]}, + }, + }, + } + // SettingsColumns holds the columns for the "settings" table. + SettingsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "key", Type: field.TypeString, Unique: true, Size: 100}, + {Name: "value", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + } + // SettingsTable holds the schema information for the "settings" table. + SettingsTable = &schema.Table{ + Name: "settings", + Columns: SettingsColumns, + PrimaryKey: []*schema.Column{SettingsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "setting_key", + Unique: true, + Columns: []*schema.Column{SettingsColumns[1]}, + }, + }, + } + // UsersColumns holds the columns for the "users" table. + UsersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "email", Type: field.TypeString, Unique: true, Size: 255}, + {Name: "password_hash", Type: field.TypeString, Size: 255}, + {Name: "role", Type: field.TypeString, Size: 20, Default: "user"}, + {Name: "balance", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "concurrency", Type: field.TypeInt, Default: 5}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "username", Type: field.TypeString, Size: 100, Default: ""}, + {Name: "wechat", Type: field.TypeString, Size: 100, Default: ""}, + {Name: "notes", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}}, + } + // UsersTable holds the schema information for the "users" table. + UsersTable = &schema.Table{ + Name: "users", + Columns: UsersColumns, + PrimaryKey: []*schema.Column{UsersColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "user_email", + Unique: true, + Columns: []*schema.Column{UsersColumns[4]}, + }, + { + Name: "user_status", + Unique: false, + Columns: []*schema.Column{UsersColumns[9]}, + }, + { + Name: "user_deleted_at", + Unique: false, + Columns: []*schema.Column{UsersColumns[3]}, + }, + }, + } + // UserAllowedGroupsColumns holds the columns for the "user_allowed_groups" table. + UserAllowedGroupsColumns = []*schema.Column{ + {Name: "created_at", Type: field.TypeTime}, + {Name: "user_id", Type: field.TypeInt64}, + {Name: "group_id", Type: field.TypeInt64}, + } + // UserAllowedGroupsTable holds the schema information for the "user_allowed_groups" table. + UserAllowedGroupsTable = &schema.Table{ + Name: "user_allowed_groups", + Columns: UserAllowedGroupsColumns, + PrimaryKey: []*schema.Column{UserAllowedGroupsColumns[1], UserAllowedGroupsColumns[2]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_allowed_groups_users_user", + Columns: []*schema.Column{UserAllowedGroupsColumns[1]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_allowed_groups_groups_group", + Columns: []*schema.Column{UserAllowedGroupsColumns[2]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "userallowedgroup_group_id", + Unique: false, + Columns: []*schema.Column{UserAllowedGroupsColumns[2]}, + }, + }, + } + // UserSubscriptionsColumns holds the columns for the "user_subscriptions" table. + UserSubscriptionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "starts_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "expires_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "daily_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "weekly_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "monthly_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "daily_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "weekly_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "monthly_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "assigned_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "group_id", Type: field.TypeInt64}, + {Name: "user_id", Type: field.TypeInt64}, + {Name: "assigned_by", Type: field.TypeInt64, Nullable: true}, + } + // UserSubscriptionsTable holds the schema information for the "user_subscriptions" table. + UserSubscriptionsTable = &schema.Table{ + Name: "user_subscriptions", + Columns: UserSubscriptionsColumns, + PrimaryKey: []*schema.Column{UserSubscriptionsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_subscriptions_groups_subscriptions", + Columns: []*schema.Column{UserSubscriptionsColumns[14]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_subscriptions_users_subscriptions", + Columns: []*schema.Column{UserSubscriptionsColumns[15]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_subscriptions_users_assigned_subscriptions", + Columns: []*schema.Column{UserSubscriptionsColumns[16]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "usersubscription_user_id", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[15]}, + }, + { + Name: "usersubscription_group_id", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[14]}, + }, + { + Name: "usersubscription_status", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[5]}, + }, + { + Name: "usersubscription_expires_at", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[4]}, + }, + { + Name: "usersubscription_assigned_by", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[16]}, + }, + { + Name: "usersubscription_user_id_group_id", + Unique: true, + Columns: []*schema.Column{UserSubscriptionsColumns[15], UserSubscriptionsColumns[14]}, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + AccountsTable, + AccountGroupsTable, + APIKeysTable, + GroupsTable, + ProxiesTable, + RedeemCodesTable, + SettingsTable, + UsersTable, + UserAllowedGroupsTable, + UserSubscriptionsTable, + } +) + +func init() { + AccountsTable.Annotation = &entsql.Annotation{ + Table: "accounts", + } + AccountGroupsTable.ForeignKeys[0].RefTable = AccountsTable + AccountGroupsTable.ForeignKeys[1].RefTable = GroupsTable + AccountGroupsTable.Annotation = &entsql.Annotation{ + Table: "account_groups", + } + APIKeysTable.ForeignKeys[0].RefTable = GroupsTable + APIKeysTable.ForeignKeys[1].RefTable = UsersTable + APIKeysTable.Annotation = &entsql.Annotation{ + Table: "api_keys", + } + GroupsTable.Annotation = &entsql.Annotation{ + Table: "groups", + } + ProxiesTable.Annotation = &entsql.Annotation{ + Table: "proxies", + } + RedeemCodesTable.ForeignKeys[0].RefTable = GroupsTable + RedeemCodesTable.ForeignKeys[1].RefTable = UsersTable + RedeemCodesTable.Annotation = &entsql.Annotation{ + Table: "redeem_codes", + } + SettingsTable.Annotation = &entsql.Annotation{ + Table: "settings", + } + UsersTable.Annotation = &entsql.Annotation{ + Table: "users", + } + UserAllowedGroupsTable.ForeignKeys[0].RefTable = UsersTable + UserAllowedGroupsTable.ForeignKeys[1].RefTable = GroupsTable + UserAllowedGroupsTable.Annotation = &entsql.Annotation{ + Table: "user_allowed_groups", + } + UserSubscriptionsTable.ForeignKeys[0].RefTable = GroupsTable + UserSubscriptionsTable.ForeignKeys[1].RefTable = UsersTable + UserSubscriptionsTable.ForeignKeys[2].RefTable = UsersTable + UserSubscriptionsTable.Annotation = &entsql.Annotation{ + Table: "user_subscriptions", + } +} diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go new file mode 100644 index 00000000..45a6f5a7 --- /dev/null +++ b/backend/ent/mutation.go @@ -0,0 +1,10572 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeAccount = "Account" + TypeAccountGroup = "AccountGroup" + TypeApiKey = "ApiKey" + TypeGroup = "Group" + TypeProxy = "Proxy" + TypeRedeemCode = "RedeemCode" + TypeSetting = "Setting" + TypeUser = "User" + TypeUserAllowedGroup = "UserAllowedGroup" + TypeUserSubscription = "UserSubscription" +) + +// AccountMutation represents an operation that mutates the Account nodes in the graph. +type AccountMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + platform *string + _type *string + credentials *map[string]interface{} + extra *map[string]interface{} + proxy_id *int64 + addproxy_id *int64 + concurrency *int + addconcurrency *int + priority *int + addpriority *int + status *string + error_message *string + last_used_at *time.Time + schedulable *bool + rate_limited_at *time.Time + rate_limit_reset_at *time.Time + overload_until *time.Time + session_window_start *time.Time + session_window_end *time.Time + session_window_status *string + clearedFields map[string]struct{} + groups map[int64]struct{} + removedgroups map[int64]struct{} + clearedgroups bool + done bool + oldValue func(context.Context) (*Account, error) + predicates []predicate.Account +} + +var _ ent.Mutation = (*AccountMutation)(nil) + +// accountOption allows management of the mutation configuration using functional options. +type accountOption func(*AccountMutation) + +// newAccountMutation creates new mutation for the Account entity. +func newAccountMutation(c config, op Op, opts ...accountOption) *AccountMutation { + m := &AccountMutation{ + config: c, + op: op, + typ: TypeAccount, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAccountID sets the ID field of the mutation. +func withAccountID(id int64) accountOption { + return func(m *AccountMutation) { + var ( + err error + once sync.Once + value *Account + ) + m.oldValue = func(ctx context.Context) (*Account, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Account.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAccount sets the old Account of the mutation. +func withAccount(node *Account) accountOption { + return func(m *AccountMutation) { + m.oldValue = func(context.Context) (*Account, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AccountMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AccountMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AccountMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AccountMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Account.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *AccountMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AccountMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AccountMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AccountMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AccountMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AccountMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *AccountMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *AccountMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *AccountMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[account.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *AccountMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[account.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *AccountMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, account.FieldDeletedAt) +} + +// SetName sets the "name" field. +func (m *AccountMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *AccountMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *AccountMutation) ResetName() { + m.name = nil +} + +// SetPlatform sets the "platform" field. +func (m *AccountMutation) SetPlatform(s string) { + m.platform = &s +} + +// Platform returns the value of the "platform" field in the mutation. +func (m *AccountMutation) Platform() (r string, exists bool) { + v := m.platform + if v == nil { + return + } + return *v, true +} + +// OldPlatform returns the old "platform" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldPlatform(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPlatform is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPlatform requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPlatform: %w", err) + } + return oldValue.Platform, nil +} + +// ResetPlatform resets all changes to the "platform" field. +func (m *AccountMutation) ResetPlatform() { + m.platform = nil +} + +// SetType sets the "type" field. +func (m *AccountMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *AccountMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *AccountMutation) ResetType() { + m._type = nil +} + +// SetCredentials sets the "credentials" field. +func (m *AccountMutation) SetCredentials(value map[string]interface{}) { + m.credentials = &value +} + +// Credentials returns the value of the "credentials" field in the mutation. +func (m *AccountMutation) Credentials() (r map[string]interface{}, exists bool) { + v := m.credentials + if v == nil { + return + } + return *v, true +} + +// OldCredentials returns the old "credentials" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldCredentials(ctx context.Context) (v map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCredentials is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCredentials requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCredentials: %w", err) + } + return oldValue.Credentials, nil +} + +// ResetCredentials resets all changes to the "credentials" field. +func (m *AccountMutation) ResetCredentials() { + m.credentials = nil +} + +// SetExtra sets the "extra" field. +func (m *AccountMutation) SetExtra(value map[string]interface{}) { + m.extra = &value +} + +// Extra returns the value of the "extra" field in the mutation. +func (m *AccountMutation) Extra() (r map[string]interface{}, exists bool) { + v := m.extra + if v == nil { + return + } + return *v, true +} + +// OldExtra returns the old "extra" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldExtra(ctx context.Context) (v map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtra is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtra requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtra: %w", err) + } + return oldValue.Extra, nil +} + +// ResetExtra resets all changes to the "extra" field. +func (m *AccountMutation) ResetExtra() { + m.extra = nil +} + +// SetProxyID sets the "proxy_id" field. +func (m *AccountMutation) SetProxyID(i int64) { + m.proxy_id = &i + m.addproxy_id = nil +} + +// ProxyID returns the value of the "proxy_id" field in the mutation. +func (m *AccountMutation) ProxyID() (r int64, exists bool) { + v := m.proxy_id + if v == nil { + return + } + return *v, true +} + +// OldProxyID returns the old "proxy_id" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldProxyID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProxyID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProxyID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProxyID: %w", err) + } + return oldValue.ProxyID, nil +} + +// AddProxyID adds i to the "proxy_id" field. +func (m *AccountMutation) AddProxyID(i int64) { + if m.addproxy_id != nil { + *m.addproxy_id += i + } else { + m.addproxy_id = &i + } +} + +// AddedProxyID returns the value that was added to the "proxy_id" field in this mutation. +func (m *AccountMutation) AddedProxyID() (r int64, exists bool) { + v := m.addproxy_id + if v == nil { + return + } + return *v, true +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (m *AccountMutation) ClearProxyID() { + m.proxy_id = nil + m.addproxy_id = nil + m.clearedFields[account.FieldProxyID] = struct{}{} +} + +// ProxyIDCleared returns if the "proxy_id" field was cleared in this mutation. +func (m *AccountMutation) ProxyIDCleared() bool { + _, ok := m.clearedFields[account.FieldProxyID] + return ok +} + +// ResetProxyID resets all changes to the "proxy_id" field. +func (m *AccountMutation) ResetProxyID() { + m.proxy_id = nil + m.addproxy_id = nil + delete(m.clearedFields, account.FieldProxyID) +} + +// SetConcurrency sets the "concurrency" field. +func (m *AccountMutation) SetConcurrency(i int) { + m.concurrency = &i + m.addconcurrency = nil +} + +// Concurrency returns the value of the "concurrency" field in the mutation. +func (m *AccountMutation) Concurrency() (r int, exists bool) { + v := m.concurrency + if v == nil { + return + } + return *v, true +} + +// OldConcurrency returns the old "concurrency" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldConcurrency(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConcurrency is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConcurrency requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConcurrency: %w", err) + } + return oldValue.Concurrency, nil +} + +// AddConcurrency adds i to the "concurrency" field. +func (m *AccountMutation) AddConcurrency(i int) { + if m.addconcurrency != nil { + *m.addconcurrency += i + } else { + m.addconcurrency = &i + } +} + +// AddedConcurrency returns the value that was added to the "concurrency" field in this mutation. +func (m *AccountMutation) AddedConcurrency() (r int, exists bool) { + v := m.addconcurrency + if v == nil { + return + } + return *v, true +} + +// ResetConcurrency resets all changes to the "concurrency" field. +func (m *AccountMutation) ResetConcurrency() { + m.concurrency = nil + m.addconcurrency = nil +} + +// SetPriority sets the "priority" field. +func (m *AccountMutation) SetPriority(i int) { + m.priority = &i + m.addpriority = nil +} + +// Priority returns the value of the "priority" field in the mutation. +func (m *AccountMutation) Priority() (r int, exists bool) { + v := m.priority + if v == nil { + return + } + return *v, true +} + +// OldPriority returns the old "priority" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldPriority(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPriority is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPriority requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPriority: %w", err) + } + return oldValue.Priority, nil +} + +// AddPriority adds i to the "priority" field. +func (m *AccountMutation) AddPriority(i int) { + if m.addpriority != nil { + *m.addpriority += i + } else { + m.addpriority = &i + } +} + +// AddedPriority returns the value that was added to the "priority" field in this mutation. +func (m *AccountMutation) AddedPriority() (r int, exists bool) { + v := m.addpriority + if v == nil { + return + } + return *v, true +} + +// ResetPriority resets all changes to the "priority" field. +func (m *AccountMutation) ResetPriority() { + m.priority = nil + m.addpriority = nil +} + +// SetStatus sets the "status" field. +func (m *AccountMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *AccountMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *AccountMutation) ResetStatus() { + m.status = nil +} + +// SetErrorMessage sets the "error_message" field. +func (m *AccountMutation) SetErrorMessage(s string) { + m.error_message = &s +} + +// ErrorMessage returns the value of the "error_message" field in the mutation. +func (m *AccountMutation) ErrorMessage() (r string, exists bool) { + v := m.error_message + if v == nil { + return + } + return *v, true +} + +// OldErrorMessage returns the old "error_message" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldErrorMessage(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorMessage: %w", err) + } + return oldValue.ErrorMessage, nil +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (m *AccountMutation) ClearErrorMessage() { + m.error_message = nil + m.clearedFields[account.FieldErrorMessage] = struct{}{} +} + +// ErrorMessageCleared returns if the "error_message" field was cleared in this mutation. +func (m *AccountMutation) ErrorMessageCleared() bool { + _, ok := m.clearedFields[account.FieldErrorMessage] + return ok +} + +// ResetErrorMessage resets all changes to the "error_message" field. +func (m *AccountMutation) ResetErrorMessage() { + m.error_message = nil + delete(m.clearedFields, account.FieldErrorMessage) +} + +// SetLastUsedAt sets the "last_used_at" field. +func (m *AccountMutation) SetLastUsedAt(t time.Time) { + m.last_used_at = &t +} + +// LastUsedAt returns the value of the "last_used_at" field in the mutation. +func (m *AccountMutation) LastUsedAt() (r time.Time, exists bool) { + v := m.last_used_at + if v == nil { + return + } + return *v, true +} + +// OldLastUsedAt returns the old "last_used_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldLastUsedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastUsedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastUsedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastUsedAt: %w", err) + } + return oldValue.LastUsedAt, nil +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (m *AccountMutation) ClearLastUsedAt() { + m.last_used_at = nil + m.clearedFields[account.FieldLastUsedAt] = struct{}{} +} + +// LastUsedAtCleared returns if the "last_used_at" field was cleared in this mutation. +func (m *AccountMutation) LastUsedAtCleared() bool { + _, ok := m.clearedFields[account.FieldLastUsedAt] + return ok +} + +// ResetLastUsedAt resets all changes to the "last_used_at" field. +func (m *AccountMutation) ResetLastUsedAt() { + m.last_used_at = nil + delete(m.clearedFields, account.FieldLastUsedAt) +} + +// SetSchedulable sets the "schedulable" field. +func (m *AccountMutation) SetSchedulable(b bool) { + m.schedulable = &b +} + +// Schedulable returns the value of the "schedulable" field in the mutation. +func (m *AccountMutation) Schedulable() (r bool, exists bool) { + v := m.schedulable + if v == nil { + return + } + return *v, true +} + +// OldSchedulable returns the old "schedulable" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSchedulable(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSchedulable is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSchedulable requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSchedulable: %w", err) + } + return oldValue.Schedulable, nil +} + +// ResetSchedulable resets all changes to the "schedulable" field. +func (m *AccountMutation) ResetSchedulable() { + m.schedulable = nil +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (m *AccountMutation) SetRateLimitedAt(t time.Time) { + m.rate_limited_at = &t +} + +// RateLimitedAt returns the value of the "rate_limited_at" field in the mutation. +func (m *AccountMutation) RateLimitedAt() (r time.Time, exists bool) { + v := m.rate_limited_at + if v == nil { + return + } + return *v, true +} + +// OldRateLimitedAt returns the old "rate_limited_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldRateLimitedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateLimitedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateLimitedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateLimitedAt: %w", err) + } + return oldValue.RateLimitedAt, nil +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (m *AccountMutation) ClearRateLimitedAt() { + m.rate_limited_at = nil + m.clearedFields[account.FieldRateLimitedAt] = struct{}{} +} + +// RateLimitedAtCleared returns if the "rate_limited_at" field was cleared in this mutation. +func (m *AccountMutation) RateLimitedAtCleared() bool { + _, ok := m.clearedFields[account.FieldRateLimitedAt] + return ok +} + +// ResetRateLimitedAt resets all changes to the "rate_limited_at" field. +func (m *AccountMutation) ResetRateLimitedAt() { + m.rate_limited_at = nil + delete(m.clearedFields, account.FieldRateLimitedAt) +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (m *AccountMutation) SetRateLimitResetAt(t time.Time) { + m.rate_limit_reset_at = &t +} + +// RateLimitResetAt returns the value of the "rate_limit_reset_at" field in the mutation. +func (m *AccountMutation) RateLimitResetAt() (r time.Time, exists bool) { + v := m.rate_limit_reset_at + if v == nil { + return + } + return *v, true +} + +// OldRateLimitResetAt returns the old "rate_limit_reset_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldRateLimitResetAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateLimitResetAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateLimitResetAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateLimitResetAt: %w", err) + } + return oldValue.RateLimitResetAt, nil +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (m *AccountMutation) ClearRateLimitResetAt() { + m.rate_limit_reset_at = nil + m.clearedFields[account.FieldRateLimitResetAt] = struct{}{} +} + +// RateLimitResetAtCleared returns if the "rate_limit_reset_at" field was cleared in this mutation. +func (m *AccountMutation) RateLimitResetAtCleared() bool { + _, ok := m.clearedFields[account.FieldRateLimitResetAt] + return ok +} + +// ResetRateLimitResetAt resets all changes to the "rate_limit_reset_at" field. +func (m *AccountMutation) ResetRateLimitResetAt() { + m.rate_limit_reset_at = nil + delete(m.clearedFields, account.FieldRateLimitResetAt) +} + +// SetOverloadUntil sets the "overload_until" field. +func (m *AccountMutation) SetOverloadUntil(t time.Time) { + m.overload_until = &t +} + +// OverloadUntil returns the value of the "overload_until" field in the mutation. +func (m *AccountMutation) OverloadUntil() (r time.Time, exists bool) { + v := m.overload_until + if v == nil { + return + } + return *v, true +} + +// OldOverloadUntil returns the old "overload_until" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldOverloadUntil(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOverloadUntil is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOverloadUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOverloadUntil: %w", err) + } + return oldValue.OverloadUntil, nil +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (m *AccountMutation) ClearOverloadUntil() { + m.overload_until = nil + m.clearedFields[account.FieldOverloadUntil] = struct{}{} +} + +// OverloadUntilCleared returns if the "overload_until" field was cleared in this mutation. +func (m *AccountMutation) OverloadUntilCleared() bool { + _, ok := m.clearedFields[account.FieldOverloadUntil] + return ok +} + +// ResetOverloadUntil resets all changes to the "overload_until" field. +func (m *AccountMutation) ResetOverloadUntil() { + m.overload_until = nil + delete(m.clearedFields, account.FieldOverloadUntil) +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (m *AccountMutation) SetSessionWindowStart(t time.Time) { + m.session_window_start = &t +} + +// SessionWindowStart returns the value of the "session_window_start" field in the mutation. +func (m *AccountMutation) SessionWindowStart() (r time.Time, exists bool) { + v := m.session_window_start + if v == nil { + return + } + return *v, true +} + +// OldSessionWindowStart returns the old "session_window_start" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSessionWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSessionWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSessionWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSessionWindowStart: %w", err) + } + return oldValue.SessionWindowStart, nil +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (m *AccountMutation) ClearSessionWindowStart() { + m.session_window_start = nil + m.clearedFields[account.FieldSessionWindowStart] = struct{}{} +} + +// SessionWindowStartCleared returns if the "session_window_start" field was cleared in this mutation. +func (m *AccountMutation) SessionWindowStartCleared() bool { + _, ok := m.clearedFields[account.FieldSessionWindowStart] + return ok +} + +// ResetSessionWindowStart resets all changes to the "session_window_start" field. +func (m *AccountMutation) ResetSessionWindowStart() { + m.session_window_start = nil + delete(m.clearedFields, account.FieldSessionWindowStart) +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (m *AccountMutation) SetSessionWindowEnd(t time.Time) { + m.session_window_end = &t +} + +// SessionWindowEnd returns the value of the "session_window_end" field in the mutation. +func (m *AccountMutation) SessionWindowEnd() (r time.Time, exists bool) { + v := m.session_window_end + if v == nil { + return + } + return *v, true +} + +// OldSessionWindowEnd returns the old "session_window_end" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSessionWindowEnd(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSessionWindowEnd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSessionWindowEnd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSessionWindowEnd: %w", err) + } + return oldValue.SessionWindowEnd, nil +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (m *AccountMutation) ClearSessionWindowEnd() { + m.session_window_end = nil + m.clearedFields[account.FieldSessionWindowEnd] = struct{}{} +} + +// SessionWindowEndCleared returns if the "session_window_end" field was cleared in this mutation. +func (m *AccountMutation) SessionWindowEndCleared() bool { + _, ok := m.clearedFields[account.FieldSessionWindowEnd] + return ok +} + +// ResetSessionWindowEnd resets all changes to the "session_window_end" field. +func (m *AccountMutation) ResetSessionWindowEnd() { + m.session_window_end = nil + delete(m.clearedFields, account.FieldSessionWindowEnd) +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (m *AccountMutation) SetSessionWindowStatus(s string) { + m.session_window_status = &s +} + +// SessionWindowStatus returns the value of the "session_window_status" field in the mutation. +func (m *AccountMutation) SessionWindowStatus() (r string, exists bool) { + v := m.session_window_status + if v == nil { + return + } + return *v, true +} + +// OldSessionWindowStatus returns the old "session_window_status" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSessionWindowStatus(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSessionWindowStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSessionWindowStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSessionWindowStatus: %w", err) + } + return oldValue.SessionWindowStatus, nil +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (m *AccountMutation) ClearSessionWindowStatus() { + m.session_window_status = nil + m.clearedFields[account.FieldSessionWindowStatus] = struct{}{} +} + +// SessionWindowStatusCleared returns if the "session_window_status" field was cleared in this mutation. +func (m *AccountMutation) SessionWindowStatusCleared() bool { + _, ok := m.clearedFields[account.FieldSessionWindowStatus] + return ok +} + +// ResetSessionWindowStatus resets all changes to the "session_window_status" field. +func (m *AccountMutation) ResetSessionWindowStatus() { + m.session_window_status = nil + delete(m.clearedFields, account.FieldSessionWindowStatus) +} + +// AddGroupIDs adds the "groups" edge to the Group entity by ids. +func (m *AccountMutation) AddGroupIDs(ids ...int64) { + if m.groups == nil { + m.groups = make(map[int64]struct{}) + } + for i := range ids { + m.groups[ids[i]] = struct{}{} + } +} + +// ClearGroups clears the "groups" edge to the Group entity. +func (m *AccountMutation) ClearGroups() { + m.clearedgroups = true +} + +// GroupsCleared reports if the "groups" edge to the Group entity was cleared. +func (m *AccountMutation) GroupsCleared() bool { + return m.clearedgroups +} + +// RemoveGroupIDs removes the "groups" edge to the Group entity by IDs. +func (m *AccountMutation) RemoveGroupIDs(ids ...int64) { + if m.removedgroups == nil { + m.removedgroups = make(map[int64]struct{}) + } + for i := range ids { + delete(m.groups, ids[i]) + m.removedgroups[ids[i]] = struct{}{} + } +} + +// RemovedGroups returns the removed IDs of the "groups" edge to the Group entity. +func (m *AccountMutation) RemovedGroupsIDs() (ids []int64) { + for id := range m.removedgroups { + ids = append(ids, id) + } + return +} + +// GroupsIDs returns the "groups" edge IDs in the mutation. +func (m *AccountMutation) GroupsIDs() (ids []int64) { + for id := range m.groups { + ids = append(ids, id) + } + return +} + +// ResetGroups resets all changes to the "groups" edge. +func (m *AccountMutation) ResetGroups() { + m.groups = nil + m.clearedgroups = false + m.removedgroups = nil +} + +// Where appends a list predicates to the AccountMutation builder. +func (m *AccountMutation) Where(ps ...predicate.Account) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AccountMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AccountMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Account, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AccountMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AccountMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Account). +func (m *AccountMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AccountMutation) Fields() []string { + fields := make([]string, 0, 21) + if m.created_at != nil { + fields = append(fields, account.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, account.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, account.FieldDeletedAt) + } + if m.name != nil { + fields = append(fields, account.FieldName) + } + if m.platform != nil { + fields = append(fields, account.FieldPlatform) + } + if m._type != nil { + fields = append(fields, account.FieldType) + } + if m.credentials != nil { + fields = append(fields, account.FieldCredentials) + } + if m.extra != nil { + fields = append(fields, account.FieldExtra) + } + if m.proxy_id != nil { + fields = append(fields, account.FieldProxyID) + } + if m.concurrency != nil { + fields = append(fields, account.FieldConcurrency) + } + if m.priority != nil { + fields = append(fields, account.FieldPriority) + } + if m.status != nil { + fields = append(fields, account.FieldStatus) + } + if m.error_message != nil { + fields = append(fields, account.FieldErrorMessage) + } + if m.last_used_at != nil { + fields = append(fields, account.FieldLastUsedAt) + } + if m.schedulable != nil { + fields = append(fields, account.FieldSchedulable) + } + if m.rate_limited_at != nil { + fields = append(fields, account.FieldRateLimitedAt) + } + if m.rate_limit_reset_at != nil { + fields = append(fields, account.FieldRateLimitResetAt) + } + if m.overload_until != nil { + fields = append(fields, account.FieldOverloadUntil) + } + if m.session_window_start != nil { + fields = append(fields, account.FieldSessionWindowStart) + } + if m.session_window_end != nil { + fields = append(fields, account.FieldSessionWindowEnd) + } + if m.session_window_status != nil { + fields = append(fields, account.FieldSessionWindowStatus) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AccountMutation) Field(name string) (ent.Value, bool) { + switch name { + case account.FieldCreatedAt: + return m.CreatedAt() + case account.FieldUpdatedAt: + return m.UpdatedAt() + case account.FieldDeletedAt: + return m.DeletedAt() + case account.FieldName: + return m.Name() + case account.FieldPlatform: + return m.Platform() + case account.FieldType: + return m.GetType() + case account.FieldCredentials: + return m.Credentials() + case account.FieldExtra: + return m.Extra() + case account.FieldProxyID: + return m.ProxyID() + case account.FieldConcurrency: + return m.Concurrency() + case account.FieldPriority: + return m.Priority() + case account.FieldStatus: + return m.Status() + case account.FieldErrorMessage: + return m.ErrorMessage() + case account.FieldLastUsedAt: + return m.LastUsedAt() + case account.FieldSchedulable: + return m.Schedulable() + case account.FieldRateLimitedAt: + return m.RateLimitedAt() + case account.FieldRateLimitResetAt: + return m.RateLimitResetAt() + case account.FieldOverloadUntil: + return m.OverloadUntil() + case account.FieldSessionWindowStart: + return m.SessionWindowStart() + case account.FieldSessionWindowEnd: + return m.SessionWindowEnd() + case account.FieldSessionWindowStatus: + return m.SessionWindowStatus() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AccountMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case account.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case account.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case account.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case account.FieldName: + return m.OldName(ctx) + case account.FieldPlatform: + return m.OldPlatform(ctx) + case account.FieldType: + return m.OldType(ctx) + case account.FieldCredentials: + return m.OldCredentials(ctx) + case account.FieldExtra: + return m.OldExtra(ctx) + case account.FieldProxyID: + return m.OldProxyID(ctx) + case account.FieldConcurrency: + return m.OldConcurrency(ctx) + case account.FieldPriority: + return m.OldPriority(ctx) + case account.FieldStatus: + return m.OldStatus(ctx) + case account.FieldErrorMessage: + return m.OldErrorMessage(ctx) + case account.FieldLastUsedAt: + return m.OldLastUsedAt(ctx) + case account.FieldSchedulable: + return m.OldSchedulable(ctx) + case account.FieldRateLimitedAt: + return m.OldRateLimitedAt(ctx) + case account.FieldRateLimitResetAt: + return m.OldRateLimitResetAt(ctx) + case account.FieldOverloadUntil: + return m.OldOverloadUntil(ctx) + case account.FieldSessionWindowStart: + return m.OldSessionWindowStart(ctx) + case account.FieldSessionWindowEnd: + return m.OldSessionWindowEnd(ctx) + case account.FieldSessionWindowStatus: + return m.OldSessionWindowStatus(ctx) + } + return nil, fmt.Errorf("unknown Account field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountMutation) SetField(name string, value ent.Value) error { + switch name { + case account.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case account.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case account.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case account.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case account.FieldPlatform: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPlatform(v) + return nil + case account.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case account.FieldCredentials: + v, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCredentials(v) + return nil + case account.FieldExtra: + v, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtra(v) + return nil + case account.FieldProxyID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProxyID(v) + return nil + case account.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConcurrency(v) + return nil + case account.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPriority(v) + return nil + case account.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case account.FieldErrorMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorMessage(v) + return nil + case account.FieldLastUsedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastUsedAt(v) + return nil + case account.FieldSchedulable: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSchedulable(v) + return nil + case account.FieldRateLimitedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateLimitedAt(v) + return nil + case account.FieldRateLimitResetAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateLimitResetAt(v) + return nil + case account.FieldOverloadUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOverloadUntil(v) + return nil + case account.FieldSessionWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSessionWindowStart(v) + return nil + case account.FieldSessionWindowEnd: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSessionWindowEnd(v) + return nil + case account.FieldSessionWindowStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSessionWindowStatus(v) + return nil + } + return fmt.Errorf("unknown Account field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AccountMutation) AddedFields() []string { + var fields []string + if m.addproxy_id != nil { + fields = append(fields, account.FieldProxyID) + } + if m.addconcurrency != nil { + fields = append(fields, account.FieldConcurrency) + } + if m.addpriority != nil { + fields = append(fields, account.FieldPriority) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AccountMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case account.FieldProxyID: + return m.AddedProxyID() + case account.FieldConcurrency: + return m.AddedConcurrency() + case account.FieldPriority: + return m.AddedPriority() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountMutation) AddField(name string, value ent.Value) error { + switch name { + case account.FieldProxyID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddProxyID(v) + return nil + case account.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddConcurrency(v) + return nil + case account.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPriority(v) + return nil + } + return fmt.Errorf("unknown Account numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AccountMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(account.FieldDeletedAt) { + fields = append(fields, account.FieldDeletedAt) + } + if m.FieldCleared(account.FieldProxyID) { + fields = append(fields, account.FieldProxyID) + } + if m.FieldCleared(account.FieldErrorMessage) { + fields = append(fields, account.FieldErrorMessage) + } + if m.FieldCleared(account.FieldLastUsedAt) { + fields = append(fields, account.FieldLastUsedAt) + } + if m.FieldCleared(account.FieldRateLimitedAt) { + fields = append(fields, account.FieldRateLimitedAt) + } + if m.FieldCleared(account.FieldRateLimitResetAt) { + fields = append(fields, account.FieldRateLimitResetAt) + } + if m.FieldCleared(account.FieldOverloadUntil) { + fields = append(fields, account.FieldOverloadUntil) + } + if m.FieldCleared(account.FieldSessionWindowStart) { + fields = append(fields, account.FieldSessionWindowStart) + } + if m.FieldCleared(account.FieldSessionWindowEnd) { + fields = append(fields, account.FieldSessionWindowEnd) + } + if m.FieldCleared(account.FieldSessionWindowStatus) { + fields = append(fields, account.FieldSessionWindowStatus) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AccountMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AccountMutation) ClearField(name string) error { + switch name { + case account.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case account.FieldProxyID: + m.ClearProxyID() + return nil + case account.FieldErrorMessage: + m.ClearErrorMessage() + return nil + case account.FieldLastUsedAt: + m.ClearLastUsedAt() + return nil + case account.FieldRateLimitedAt: + m.ClearRateLimitedAt() + return nil + case account.FieldRateLimitResetAt: + m.ClearRateLimitResetAt() + return nil + case account.FieldOverloadUntil: + m.ClearOverloadUntil() + return nil + case account.FieldSessionWindowStart: + m.ClearSessionWindowStart() + return nil + case account.FieldSessionWindowEnd: + m.ClearSessionWindowEnd() + return nil + case account.FieldSessionWindowStatus: + m.ClearSessionWindowStatus() + return nil + } + return fmt.Errorf("unknown Account nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AccountMutation) ResetField(name string) error { + switch name { + case account.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case account.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case account.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case account.FieldName: + m.ResetName() + return nil + case account.FieldPlatform: + m.ResetPlatform() + return nil + case account.FieldType: + m.ResetType() + return nil + case account.FieldCredentials: + m.ResetCredentials() + return nil + case account.FieldExtra: + m.ResetExtra() + return nil + case account.FieldProxyID: + m.ResetProxyID() + return nil + case account.FieldConcurrency: + m.ResetConcurrency() + return nil + case account.FieldPriority: + m.ResetPriority() + return nil + case account.FieldStatus: + m.ResetStatus() + return nil + case account.FieldErrorMessage: + m.ResetErrorMessage() + return nil + case account.FieldLastUsedAt: + m.ResetLastUsedAt() + return nil + case account.FieldSchedulable: + m.ResetSchedulable() + return nil + case account.FieldRateLimitedAt: + m.ResetRateLimitedAt() + return nil + case account.FieldRateLimitResetAt: + m.ResetRateLimitResetAt() + return nil + case account.FieldOverloadUntil: + m.ResetOverloadUntil() + return nil + case account.FieldSessionWindowStart: + m.ResetSessionWindowStart() + return nil + case account.FieldSessionWindowEnd: + m.ResetSessionWindowEnd() + return nil + case account.FieldSessionWindowStatus: + m.ResetSessionWindowStatus() + return nil + } + return fmt.Errorf("unknown Account field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AccountMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.groups != nil { + edges = append(edges, account.EdgeGroups) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AccountMutation) AddedIDs(name string) []ent.Value { + switch name { + case account.EdgeGroups: + ids := make([]ent.Value, 0, len(m.groups)) + for id := range m.groups { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AccountMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedgroups != nil { + edges = append(edges, account.EdgeGroups) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AccountMutation) RemovedIDs(name string) []ent.Value { + switch name { + case account.EdgeGroups: + ids := make([]ent.Value, 0, len(m.removedgroups)) + for id := range m.removedgroups { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AccountMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedgroups { + edges = append(edges, account.EdgeGroups) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AccountMutation) EdgeCleared(name string) bool { + switch name { + case account.EdgeGroups: + return m.clearedgroups + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AccountMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Account unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AccountMutation) ResetEdge(name string) error { + switch name { + case account.EdgeGroups: + m.ResetGroups() + return nil + } + return fmt.Errorf("unknown Account edge %s", name) +} + +// AccountGroupMutation represents an operation that mutates the AccountGroup nodes in the graph. +type AccountGroupMutation struct { + config + op Op + typ string + priority *int + addpriority *int + created_at *time.Time + clearedFields map[string]struct{} + account *int64 + clearedaccount bool + group *int64 + clearedgroup bool + done bool + oldValue func(context.Context) (*AccountGroup, error) + predicates []predicate.AccountGroup +} + +var _ ent.Mutation = (*AccountGroupMutation)(nil) + +// accountgroupOption allows management of the mutation configuration using functional options. +type accountgroupOption func(*AccountGroupMutation) + +// newAccountGroupMutation creates new mutation for the AccountGroup entity. +func newAccountGroupMutation(c config, op Op, opts ...accountgroupOption) *AccountGroupMutation { + m := &AccountGroupMutation{ + config: c, + op: op, + typ: TypeAccountGroup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AccountGroupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AccountGroupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetAccountID sets the "account_id" field. +func (m *AccountGroupMutation) SetAccountID(i int64) { + m.account = &i +} + +// AccountID returns the value of the "account_id" field in the mutation. +func (m *AccountGroupMutation) AccountID() (r int64, exists bool) { + v := m.account + if v == nil { + return + } + return *v, true +} + +// ResetAccountID resets all changes to the "account_id" field. +func (m *AccountGroupMutation) ResetAccountID() { + m.account = nil +} + +// SetGroupID sets the "group_id" field. +func (m *AccountGroupMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *AccountGroupMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *AccountGroupMutation) ResetGroupID() { + m.group = nil +} + +// SetPriority sets the "priority" field. +func (m *AccountGroupMutation) SetPriority(i int) { + m.priority = &i + m.addpriority = nil +} + +// Priority returns the value of the "priority" field in the mutation. +func (m *AccountGroupMutation) Priority() (r int, exists bool) { + v := m.priority + if v == nil { + return + } + return *v, true +} + +// AddPriority adds i to the "priority" field. +func (m *AccountGroupMutation) AddPriority(i int) { + if m.addpriority != nil { + *m.addpriority += i + } else { + m.addpriority = &i + } +} + +// AddedPriority returns the value that was added to the "priority" field in this mutation. +func (m *AccountGroupMutation) AddedPriority() (r int, exists bool) { + v := m.addpriority + if v == nil { + return + } + return *v, true +} + +// ResetPriority resets all changes to the "priority" field. +func (m *AccountGroupMutation) ResetPriority() { + m.priority = nil + m.addpriority = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *AccountGroupMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AccountGroupMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AccountGroupMutation) ResetCreatedAt() { + m.created_at = nil +} + +// ClearAccount clears the "account" edge to the Account entity. +func (m *AccountGroupMutation) ClearAccount() { + m.clearedaccount = true + m.clearedFields[accountgroup.FieldAccountID] = struct{}{} +} + +// AccountCleared reports if the "account" edge to the Account entity was cleared. +func (m *AccountGroupMutation) AccountCleared() bool { + return m.clearedaccount +} + +// AccountIDs returns the "account" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AccountID instead. It exists only for internal usage by the builders. +func (m *AccountGroupMutation) AccountIDs() (ids []int64) { + if id := m.account; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAccount resets all changes to the "account" edge. +func (m *AccountGroupMutation) ResetAccount() { + m.account = nil + m.clearedaccount = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *AccountGroupMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[accountgroup.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *AccountGroupMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *AccountGroupMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *AccountGroupMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the AccountGroupMutation builder. +func (m *AccountGroupMutation) Where(ps ...predicate.AccountGroup) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AccountGroupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AccountGroupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AccountGroup, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AccountGroupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AccountGroupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AccountGroup). +func (m *AccountGroupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AccountGroupMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.account != nil { + fields = append(fields, accountgroup.FieldAccountID) + } + if m.group != nil { + fields = append(fields, accountgroup.FieldGroupID) + } + if m.priority != nil { + fields = append(fields, accountgroup.FieldPriority) + } + if m.created_at != nil { + fields = append(fields, accountgroup.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AccountGroupMutation) Field(name string) (ent.Value, bool) { + switch name { + case accountgroup.FieldAccountID: + return m.AccountID() + case accountgroup.FieldGroupID: + return m.GroupID() + case accountgroup.FieldPriority: + return m.Priority() + case accountgroup.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AccountGroupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + return nil, errors.New("edge schema AccountGroup does not support getting old values") +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountGroupMutation) SetField(name string, value ent.Value) error { + switch name { + case accountgroup.FieldAccountID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccountID(v) + return nil + case accountgroup.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case accountgroup.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPriority(v) + return nil + case accountgroup.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown AccountGroup field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AccountGroupMutation) AddedFields() []string { + var fields []string + if m.addpriority != nil { + fields = append(fields, accountgroup.FieldPriority) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AccountGroupMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case accountgroup.FieldPriority: + return m.AddedPriority() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountGroupMutation) AddField(name string, value ent.Value) error { + switch name { + case accountgroup.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPriority(v) + return nil + } + return fmt.Errorf("unknown AccountGroup numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AccountGroupMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AccountGroupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AccountGroupMutation) ClearField(name string) error { + return fmt.Errorf("unknown AccountGroup nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AccountGroupMutation) ResetField(name string) error { + switch name { + case accountgroup.FieldAccountID: + m.ResetAccountID() + return nil + case accountgroup.FieldGroupID: + m.ResetGroupID() + return nil + case accountgroup.FieldPriority: + m.ResetPriority() + return nil + case accountgroup.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown AccountGroup field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AccountGroupMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.account != nil { + edges = append(edges, accountgroup.EdgeAccount) + } + if m.group != nil { + edges = append(edges, accountgroup.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AccountGroupMutation) AddedIDs(name string) []ent.Value { + switch name { + case accountgroup.EdgeAccount: + if id := m.account; id != nil { + return []ent.Value{*id} + } + case accountgroup.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AccountGroupMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AccountGroupMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AccountGroupMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedaccount { + edges = append(edges, accountgroup.EdgeAccount) + } + if m.clearedgroup { + edges = append(edges, accountgroup.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AccountGroupMutation) EdgeCleared(name string) bool { + switch name { + case accountgroup.EdgeAccount: + return m.clearedaccount + case accountgroup.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AccountGroupMutation) ClearEdge(name string) error { + switch name { + case accountgroup.EdgeAccount: + m.ClearAccount() + return nil + case accountgroup.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown AccountGroup unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AccountGroupMutation) ResetEdge(name string) error { + switch name { + case accountgroup.EdgeAccount: + m.ResetAccount() + return nil + case accountgroup.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown AccountGroup edge %s", name) +} + +// ApiKeyMutation represents an operation that mutates the ApiKey nodes in the graph. +type ApiKeyMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + key *string + name *string + status *string + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + done bool + oldValue func(context.Context) (*ApiKey, error) + predicates []predicate.ApiKey +} + +var _ ent.Mutation = (*ApiKeyMutation)(nil) + +// apikeyOption allows management of the mutation configuration using functional options. +type apikeyOption func(*ApiKeyMutation) + +// newApiKeyMutation creates new mutation for the ApiKey entity. +func newApiKeyMutation(c config, op Op, opts ...apikeyOption) *ApiKeyMutation { + m := &ApiKeyMutation{ + config: c, + op: op, + typ: TypeApiKey, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withApiKeyID sets the ID field of the mutation. +func withApiKeyID(id int64) apikeyOption { + return func(m *ApiKeyMutation) { + var ( + err error + once sync.Once + value *ApiKey + ) + m.oldValue = func(ctx context.Context) (*ApiKey, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().ApiKey.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withApiKey sets the old ApiKey of the mutation. +func withApiKey(node *ApiKey) apikeyOption { + return func(m *ApiKeyMutation) { + m.oldValue = func(context.Context) (*ApiKey, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ApiKeyMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ApiKeyMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ApiKeyMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ApiKeyMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().ApiKey.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *ApiKeyMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *ApiKeyMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *ApiKeyMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *ApiKeyMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *ApiKeyMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *ApiKeyMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *ApiKeyMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *ApiKeyMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *ApiKeyMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[apikey.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *ApiKeyMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[apikey.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *ApiKeyMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, apikey.FieldDeletedAt) +} + +// SetUserID sets the "user_id" field. +func (m *ApiKeyMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *ApiKeyMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *ApiKeyMutation) ResetUserID() { + m.user = nil +} + +// SetKey sets the "key" field. +func (m *ApiKeyMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the value of the "key" field in the mutation. +func (m *ApiKeyMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old "key" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey resets all changes to the "key" field. +func (m *ApiKeyMutation) ResetKey() { + m.key = nil +} + +// SetName sets the "name" field. +func (m *ApiKeyMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ApiKeyMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ApiKeyMutation) ResetName() { + m.name = nil +} + +// SetGroupID sets the "group_id" field. +func (m *ApiKeyMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *ApiKeyMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldGroupID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ClearGroupID clears the value of the "group_id" field. +func (m *ApiKeyMutation) ClearGroupID() { + m.group = nil + m.clearedFields[apikey.FieldGroupID] = struct{}{} +} + +// GroupIDCleared returns if the "group_id" field was cleared in this mutation. +func (m *ApiKeyMutation) GroupIDCleared() bool { + _, ok := m.clearedFields[apikey.FieldGroupID] + return ok +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *ApiKeyMutation) ResetGroupID() { + m.group = nil + delete(m.clearedFields, apikey.FieldGroupID) +} + +// SetStatus sets the "status" field. +func (m *ApiKeyMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *ApiKeyMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the ApiKey entity. +// If the ApiKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ApiKeyMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *ApiKeyMutation) ResetStatus() { + m.status = nil +} + +// ClearUser clears the "user" edge to the User entity. +func (m *ApiKeyMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[apikey.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *ApiKeyMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *ApiKeyMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *ApiKeyMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *ApiKeyMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[apikey.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *ApiKeyMutation) GroupCleared() bool { + return m.GroupIDCleared() || m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *ApiKeyMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *ApiKeyMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the ApiKeyMutation builder. +func (m *ApiKeyMutation) Where(ps ...predicate.ApiKey) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ApiKeyMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ApiKeyMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ApiKey, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ApiKeyMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ApiKeyMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (ApiKey). +func (m *ApiKeyMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ApiKeyMutation) Fields() []string { + fields := make([]string, 0, 8) + if m.created_at != nil { + fields = append(fields, apikey.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, apikey.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, apikey.FieldDeletedAt) + } + if m.user != nil { + fields = append(fields, apikey.FieldUserID) + } + if m.key != nil { + fields = append(fields, apikey.FieldKey) + } + if m.name != nil { + fields = append(fields, apikey.FieldName) + } + if m.group != nil { + fields = append(fields, apikey.FieldGroupID) + } + if m.status != nil { + fields = append(fields, apikey.FieldStatus) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ApiKeyMutation) Field(name string) (ent.Value, bool) { + switch name { + case apikey.FieldCreatedAt: + return m.CreatedAt() + case apikey.FieldUpdatedAt: + return m.UpdatedAt() + case apikey.FieldDeletedAt: + return m.DeletedAt() + case apikey.FieldUserID: + return m.UserID() + case apikey.FieldKey: + return m.Key() + case apikey.FieldName: + return m.Name() + case apikey.FieldGroupID: + return m.GroupID() + case apikey.FieldStatus: + return m.Status() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ApiKeyMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case apikey.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case apikey.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case apikey.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case apikey.FieldUserID: + return m.OldUserID(ctx) + case apikey.FieldKey: + return m.OldKey(ctx) + case apikey.FieldName: + return m.OldName(ctx) + case apikey.FieldGroupID: + return m.OldGroupID(ctx) + case apikey.FieldStatus: + return m.OldStatus(ctx) + } + return nil, fmt.Errorf("unknown ApiKey field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ApiKeyMutation) SetField(name string, value ent.Value) error { + switch name { + case apikey.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case apikey.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case apikey.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case apikey.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case apikey.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case apikey.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case apikey.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case apikey.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + } + return fmt.Errorf("unknown ApiKey field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ApiKeyMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ApiKeyMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ApiKeyMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown ApiKey numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ApiKeyMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(apikey.FieldDeletedAt) { + fields = append(fields, apikey.FieldDeletedAt) + } + if m.FieldCleared(apikey.FieldGroupID) { + fields = append(fields, apikey.FieldGroupID) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ApiKeyMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ApiKeyMutation) ClearField(name string) error { + switch name { + case apikey.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case apikey.FieldGroupID: + m.ClearGroupID() + return nil + } + return fmt.Errorf("unknown ApiKey nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ApiKeyMutation) ResetField(name string) error { + switch name { + case apikey.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case apikey.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case apikey.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case apikey.FieldUserID: + m.ResetUserID() + return nil + case apikey.FieldKey: + m.ResetKey() + return nil + case apikey.FieldName: + m.ResetName() + return nil + case apikey.FieldGroupID: + m.ResetGroupID() + return nil + case apikey.FieldStatus: + m.ResetStatus() + return nil + } + return fmt.Errorf("unknown ApiKey field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ApiKeyMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, apikey.EdgeUser) + } + if m.group != nil { + edges = append(edges, apikey.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ApiKeyMutation) AddedIDs(name string) []ent.Value { + switch name { + case apikey.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case apikey.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ApiKeyMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ApiKeyMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ApiKeyMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, apikey.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, apikey.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ApiKeyMutation) EdgeCleared(name string) bool { + switch name { + case apikey.EdgeUser: + return m.cleareduser + case apikey.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ApiKeyMutation) ClearEdge(name string) error { + switch name { + case apikey.EdgeUser: + m.ClearUser() + return nil + case apikey.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown ApiKey unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ApiKeyMutation) ResetEdge(name string) error { + switch name { + case apikey.EdgeUser: + m.ResetUser() + return nil + case apikey.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown ApiKey edge %s", name) +} + +// GroupMutation represents an operation that mutates the Group nodes in the graph. +type GroupMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + description *string + rate_multiplier *float64 + addrate_multiplier *float64 + is_exclusive *bool + status *string + platform *string + subscription_type *string + daily_limit_usd *float64 + adddaily_limit_usd *float64 + weekly_limit_usd *float64 + addweekly_limit_usd *float64 + monthly_limit_usd *float64 + addmonthly_limit_usd *float64 + clearedFields map[string]struct{} + api_keys map[int64]struct{} + removedapi_keys map[int64]struct{} + clearedapi_keys bool + redeem_codes map[int64]struct{} + removedredeem_codes map[int64]struct{} + clearedredeem_codes bool + subscriptions map[int64]struct{} + removedsubscriptions map[int64]struct{} + clearedsubscriptions bool + accounts map[int64]struct{} + removedaccounts map[int64]struct{} + clearedaccounts bool + allowed_users map[int64]struct{} + removedallowed_users map[int64]struct{} + clearedallowed_users bool + done bool + oldValue func(context.Context) (*Group, error) + predicates []predicate.Group +} + +var _ ent.Mutation = (*GroupMutation)(nil) + +// groupOption allows management of the mutation configuration using functional options. +type groupOption func(*GroupMutation) + +// newGroupMutation creates new mutation for the Group entity. +func newGroupMutation(c config, op Op, opts ...groupOption) *GroupMutation { + m := &GroupMutation{ + config: c, + op: op, + typ: TypeGroup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withGroupID sets the ID field of the mutation. +func withGroupID(id int64) groupOption { + return func(m *GroupMutation) { + var ( + err error + once sync.Once + value *Group + ) + m.oldValue = func(ctx context.Context) (*Group, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Group.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withGroup sets the old Group of the mutation. +func withGroup(node *Group) groupOption { + return func(m *GroupMutation) { + m.oldValue = func(context.Context) (*Group, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m GroupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m GroupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *GroupMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *GroupMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Group.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *GroupMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *GroupMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *GroupMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *GroupMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *GroupMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *GroupMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *GroupMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *GroupMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *GroupMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[group.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *GroupMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[group.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *GroupMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, group.FieldDeletedAt) +} + +// SetName sets the "name" field. +func (m *GroupMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *GroupMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *GroupMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *GroupMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *GroupMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDescription(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *GroupMutation) ClearDescription() { + m.description = nil + m.clearedFields[group.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *GroupMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[group.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *GroupMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, group.FieldDescription) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (m *GroupMutation) SetRateMultiplier(f float64) { + m.rate_multiplier = &f + m.addrate_multiplier = nil +} + +// RateMultiplier returns the value of the "rate_multiplier" field in the mutation. +func (m *GroupMutation) RateMultiplier() (r float64, exists bool) { + v := m.rate_multiplier + if v == nil { + return + } + return *v, true +} + +// OldRateMultiplier returns the old "rate_multiplier" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldRateMultiplier(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateMultiplier is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateMultiplier requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateMultiplier: %w", err) + } + return oldValue.RateMultiplier, nil +} + +// AddRateMultiplier adds f to the "rate_multiplier" field. +func (m *GroupMutation) AddRateMultiplier(f float64) { + if m.addrate_multiplier != nil { + *m.addrate_multiplier += f + } else { + m.addrate_multiplier = &f + } +} + +// AddedRateMultiplier returns the value that was added to the "rate_multiplier" field in this mutation. +func (m *GroupMutation) AddedRateMultiplier() (r float64, exists bool) { + v := m.addrate_multiplier + if v == nil { + return + } + return *v, true +} + +// ResetRateMultiplier resets all changes to the "rate_multiplier" field. +func (m *GroupMutation) ResetRateMultiplier() { + m.rate_multiplier = nil + m.addrate_multiplier = nil +} + +// SetIsExclusive sets the "is_exclusive" field. +func (m *GroupMutation) SetIsExclusive(b bool) { + m.is_exclusive = &b +} + +// IsExclusive returns the value of the "is_exclusive" field in the mutation. +func (m *GroupMutation) IsExclusive() (r bool, exists bool) { + v := m.is_exclusive + if v == nil { + return + } + return *v, true +} + +// OldIsExclusive returns the old "is_exclusive" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldIsExclusive(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsExclusive is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsExclusive requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsExclusive: %w", err) + } + return oldValue.IsExclusive, nil +} + +// ResetIsExclusive resets all changes to the "is_exclusive" field. +func (m *GroupMutation) ResetIsExclusive() { + m.is_exclusive = nil +} + +// SetStatus sets the "status" field. +func (m *GroupMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *GroupMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *GroupMutation) ResetStatus() { + m.status = nil +} + +// SetPlatform sets the "platform" field. +func (m *GroupMutation) SetPlatform(s string) { + m.platform = &s +} + +// Platform returns the value of the "platform" field in the mutation. +func (m *GroupMutation) Platform() (r string, exists bool) { + v := m.platform + if v == nil { + return + } + return *v, true +} + +// OldPlatform returns the old "platform" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldPlatform(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPlatform is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPlatform requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPlatform: %w", err) + } + return oldValue.Platform, nil +} + +// ResetPlatform resets all changes to the "platform" field. +func (m *GroupMutation) ResetPlatform() { + m.platform = nil +} + +// SetSubscriptionType sets the "subscription_type" field. +func (m *GroupMutation) SetSubscriptionType(s string) { + m.subscription_type = &s +} + +// SubscriptionType returns the value of the "subscription_type" field in the mutation. +func (m *GroupMutation) SubscriptionType() (r string, exists bool) { + v := m.subscription_type + if v == nil { + return + } + return *v, true +} + +// OldSubscriptionType returns the old "subscription_type" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldSubscriptionType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSubscriptionType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSubscriptionType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSubscriptionType: %w", err) + } + return oldValue.SubscriptionType, nil +} + +// ResetSubscriptionType resets all changes to the "subscription_type" field. +func (m *GroupMutation) ResetSubscriptionType() { + m.subscription_type = nil +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (m *GroupMutation) SetDailyLimitUsd(f float64) { + m.daily_limit_usd = &f + m.adddaily_limit_usd = nil +} + +// DailyLimitUsd returns the value of the "daily_limit_usd" field in the mutation. +func (m *GroupMutation) DailyLimitUsd() (r float64, exists bool) { + v := m.daily_limit_usd + if v == nil { + return + } + return *v, true +} + +// OldDailyLimitUsd returns the old "daily_limit_usd" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDailyLimitUsd(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDailyLimitUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDailyLimitUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDailyLimitUsd: %w", err) + } + return oldValue.DailyLimitUsd, nil +} + +// AddDailyLimitUsd adds f to the "daily_limit_usd" field. +func (m *GroupMutation) AddDailyLimitUsd(f float64) { + if m.adddaily_limit_usd != nil { + *m.adddaily_limit_usd += f + } else { + m.adddaily_limit_usd = &f + } +} + +// AddedDailyLimitUsd returns the value that was added to the "daily_limit_usd" field in this mutation. +func (m *GroupMutation) AddedDailyLimitUsd() (r float64, exists bool) { + v := m.adddaily_limit_usd + if v == nil { + return + } + return *v, true +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (m *GroupMutation) ClearDailyLimitUsd() { + m.daily_limit_usd = nil + m.adddaily_limit_usd = nil + m.clearedFields[group.FieldDailyLimitUsd] = struct{}{} +} + +// DailyLimitUsdCleared returns if the "daily_limit_usd" field was cleared in this mutation. +func (m *GroupMutation) DailyLimitUsdCleared() bool { + _, ok := m.clearedFields[group.FieldDailyLimitUsd] + return ok +} + +// ResetDailyLimitUsd resets all changes to the "daily_limit_usd" field. +func (m *GroupMutation) ResetDailyLimitUsd() { + m.daily_limit_usd = nil + m.adddaily_limit_usd = nil + delete(m.clearedFields, group.FieldDailyLimitUsd) +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (m *GroupMutation) SetWeeklyLimitUsd(f float64) { + m.weekly_limit_usd = &f + m.addweekly_limit_usd = nil +} + +// WeeklyLimitUsd returns the value of the "weekly_limit_usd" field in the mutation. +func (m *GroupMutation) WeeklyLimitUsd() (r float64, exists bool) { + v := m.weekly_limit_usd + if v == nil { + return + } + return *v, true +} + +// OldWeeklyLimitUsd returns the old "weekly_limit_usd" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldWeeklyLimitUsd(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWeeklyLimitUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWeeklyLimitUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWeeklyLimitUsd: %w", err) + } + return oldValue.WeeklyLimitUsd, nil +} + +// AddWeeklyLimitUsd adds f to the "weekly_limit_usd" field. +func (m *GroupMutation) AddWeeklyLimitUsd(f float64) { + if m.addweekly_limit_usd != nil { + *m.addweekly_limit_usd += f + } else { + m.addweekly_limit_usd = &f + } +} + +// AddedWeeklyLimitUsd returns the value that was added to the "weekly_limit_usd" field in this mutation. +func (m *GroupMutation) AddedWeeklyLimitUsd() (r float64, exists bool) { + v := m.addweekly_limit_usd + if v == nil { + return + } + return *v, true +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (m *GroupMutation) ClearWeeklyLimitUsd() { + m.weekly_limit_usd = nil + m.addweekly_limit_usd = nil + m.clearedFields[group.FieldWeeklyLimitUsd] = struct{}{} +} + +// WeeklyLimitUsdCleared returns if the "weekly_limit_usd" field was cleared in this mutation. +func (m *GroupMutation) WeeklyLimitUsdCleared() bool { + _, ok := m.clearedFields[group.FieldWeeklyLimitUsd] + return ok +} + +// ResetWeeklyLimitUsd resets all changes to the "weekly_limit_usd" field. +func (m *GroupMutation) ResetWeeklyLimitUsd() { + m.weekly_limit_usd = nil + m.addweekly_limit_usd = nil + delete(m.clearedFields, group.FieldWeeklyLimitUsd) +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (m *GroupMutation) SetMonthlyLimitUsd(f float64) { + m.monthly_limit_usd = &f + m.addmonthly_limit_usd = nil +} + +// MonthlyLimitUsd returns the value of the "monthly_limit_usd" field in the mutation. +func (m *GroupMutation) MonthlyLimitUsd() (r float64, exists bool) { + v := m.monthly_limit_usd + if v == nil { + return + } + return *v, true +} + +// OldMonthlyLimitUsd returns the old "monthly_limit_usd" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldMonthlyLimitUsd(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonthlyLimitUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonthlyLimitUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonthlyLimitUsd: %w", err) + } + return oldValue.MonthlyLimitUsd, nil +} + +// AddMonthlyLimitUsd adds f to the "monthly_limit_usd" field. +func (m *GroupMutation) AddMonthlyLimitUsd(f float64) { + if m.addmonthly_limit_usd != nil { + *m.addmonthly_limit_usd += f + } else { + m.addmonthly_limit_usd = &f + } +} + +// AddedMonthlyLimitUsd returns the value that was added to the "monthly_limit_usd" field in this mutation. +func (m *GroupMutation) AddedMonthlyLimitUsd() (r float64, exists bool) { + v := m.addmonthly_limit_usd + if v == nil { + return + } + return *v, true +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (m *GroupMutation) ClearMonthlyLimitUsd() { + m.monthly_limit_usd = nil + m.addmonthly_limit_usd = nil + m.clearedFields[group.FieldMonthlyLimitUsd] = struct{}{} +} + +// MonthlyLimitUsdCleared returns if the "monthly_limit_usd" field was cleared in this mutation. +func (m *GroupMutation) MonthlyLimitUsdCleared() bool { + _, ok := m.clearedFields[group.FieldMonthlyLimitUsd] + return ok +} + +// ResetMonthlyLimitUsd resets all changes to the "monthly_limit_usd" field. +func (m *GroupMutation) ResetMonthlyLimitUsd() { + m.monthly_limit_usd = nil + m.addmonthly_limit_usd = nil + delete(m.clearedFields, group.FieldMonthlyLimitUsd) +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by ids. +func (m *GroupMutation) AddAPIKeyIDs(ids ...int64) { + if m.api_keys == nil { + m.api_keys = make(map[int64]struct{}) + } + for i := range ids { + m.api_keys[ids[i]] = struct{}{} + } +} + +// ClearAPIKeys clears the "api_keys" edge to the ApiKey entity. +func (m *GroupMutation) ClearAPIKeys() { + m.clearedapi_keys = true +} + +// APIKeysCleared reports if the "api_keys" edge to the ApiKey entity was cleared. +func (m *GroupMutation) APIKeysCleared() bool { + return m.clearedapi_keys +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to the ApiKey entity by IDs. +func (m *GroupMutation) RemoveAPIKeyIDs(ids ...int64) { + if m.removedapi_keys == nil { + m.removedapi_keys = make(map[int64]struct{}) + } + for i := range ids { + delete(m.api_keys, ids[i]) + m.removedapi_keys[ids[i]] = struct{}{} + } +} + +// RemovedAPIKeys returns the removed IDs of the "api_keys" edge to the ApiKey entity. +func (m *GroupMutation) RemovedAPIKeysIDs() (ids []int64) { + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return +} + +// APIKeysIDs returns the "api_keys" edge IDs in the mutation. +func (m *GroupMutation) APIKeysIDs() (ids []int64) { + for id := range m.api_keys { + ids = append(ids, id) + } + return +} + +// ResetAPIKeys resets all changes to the "api_keys" edge. +func (m *GroupMutation) ResetAPIKeys() { + m.api_keys = nil + m.clearedapi_keys = false + m.removedapi_keys = nil +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by ids. +func (m *GroupMutation) AddRedeemCodeIDs(ids ...int64) { + if m.redeem_codes == nil { + m.redeem_codes = make(map[int64]struct{}) + } + for i := range ids { + m.redeem_codes[ids[i]] = struct{}{} + } +} + +// ClearRedeemCodes clears the "redeem_codes" edge to the RedeemCode entity. +func (m *GroupMutation) ClearRedeemCodes() { + m.clearedredeem_codes = true +} + +// RedeemCodesCleared reports if the "redeem_codes" edge to the RedeemCode entity was cleared. +func (m *GroupMutation) RedeemCodesCleared() bool { + return m.clearedredeem_codes +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to the RedeemCode entity by IDs. +func (m *GroupMutation) RemoveRedeemCodeIDs(ids ...int64) { + if m.removedredeem_codes == nil { + m.removedredeem_codes = make(map[int64]struct{}) + } + for i := range ids { + delete(m.redeem_codes, ids[i]) + m.removedredeem_codes[ids[i]] = struct{}{} + } +} + +// RemovedRedeemCodes returns the removed IDs of the "redeem_codes" edge to the RedeemCode entity. +func (m *GroupMutation) RemovedRedeemCodesIDs() (ids []int64) { + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return +} + +// RedeemCodesIDs returns the "redeem_codes" edge IDs in the mutation. +func (m *GroupMutation) RedeemCodesIDs() (ids []int64) { + for id := range m.redeem_codes { + ids = append(ids, id) + } + return +} + +// ResetRedeemCodes resets all changes to the "redeem_codes" edge. +func (m *GroupMutation) ResetRedeemCodes() { + m.redeem_codes = nil + m.clearedredeem_codes = false + m.removedredeem_codes = nil +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by ids. +func (m *GroupMutation) AddSubscriptionIDs(ids ...int64) { + if m.subscriptions == nil { + m.subscriptions = make(map[int64]struct{}) + } + for i := range ids { + m.subscriptions[ids[i]] = struct{}{} + } +} + +// ClearSubscriptions clears the "subscriptions" edge to the UserSubscription entity. +func (m *GroupMutation) ClearSubscriptions() { + m.clearedsubscriptions = true +} + +// SubscriptionsCleared reports if the "subscriptions" edge to the UserSubscription entity was cleared. +func (m *GroupMutation) SubscriptionsCleared() bool { + return m.clearedsubscriptions +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to the UserSubscription entity by IDs. +func (m *GroupMutation) RemoveSubscriptionIDs(ids ...int64) { + if m.removedsubscriptions == nil { + m.removedsubscriptions = make(map[int64]struct{}) + } + for i := range ids { + delete(m.subscriptions, ids[i]) + m.removedsubscriptions[ids[i]] = struct{}{} + } +} + +// RemovedSubscriptions returns the removed IDs of the "subscriptions" edge to the UserSubscription entity. +func (m *GroupMutation) RemovedSubscriptionsIDs() (ids []int64) { + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return +} + +// SubscriptionsIDs returns the "subscriptions" edge IDs in the mutation. +func (m *GroupMutation) SubscriptionsIDs() (ids []int64) { + for id := range m.subscriptions { + ids = append(ids, id) + } + return +} + +// ResetSubscriptions resets all changes to the "subscriptions" edge. +func (m *GroupMutation) ResetSubscriptions() { + m.subscriptions = nil + m.clearedsubscriptions = false + m.removedsubscriptions = nil +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by ids. +func (m *GroupMutation) AddAccountIDs(ids ...int64) { + if m.accounts == nil { + m.accounts = make(map[int64]struct{}) + } + for i := range ids { + m.accounts[ids[i]] = struct{}{} + } +} + +// ClearAccounts clears the "accounts" edge to the Account entity. +func (m *GroupMutation) ClearAccounts() { + m.clearedaccounts = true +} + +// AccountsCleared reports if the "accounts" edge to the Account entity was cleared. +func (m *GroupMutation) AccountsCleared() bool { + return m.clearedaccounts +} + +// RemoveAccountIDs removes the "accounts" edge to the Account entity by IDs. +func (m *GroupMutation) RemoveAccountIDs(ids ...int64) { + if m.removedaccounts == nil { + m.removedaccounts = make(map[int64]struct{}) + } + for i := range ids { + delete(m.accounts, ids[i]) + m.removedaccounts[ids[i]] = struct{}{} + } +} + +// RemovedAccounts returns the removed IDs of the "accounts" edge to the Account entity. +func (m *GroupMutation) RemovedAccountsIDs() (ids []int64) { + for id := range m.removedaccounts { + ids = append(ids, id) + } + return +} + +// AccountsIDs returns the "accounts" edge IDs in the mutation. +func (m *GroupMutation) AccountsIDs() (ids []int64) { + for id := range m.accounts { + ids = append(ids, id) + } + return +} + +// ResetAccounts resets all changes to the "accounts" edge. +func (m *GroupMutation) ResetAccounts() { + m.accounts = nil + m.clearedaccounts = false + m.removedaccounts = nil +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by ids. +func (m *GroupMutation) AddAllowedUserIDs(ids ...int64) { + if m.allowed_users == nil { + m.allowed_users = make(map[int64]struct{}) + } + for i := range ids { + m.allowed_users[ids[i]] = struct{}{} + } +} + +// ClearAllowedUsers clears the "allowed_users" edge to the User entity. +func (m *GroupMutation) ClearAllowedUsers() { + m.clearedallowed_users = true +} + +// AllowedUsersCleared reports if the "allowed_users" edge to the User entity was cleared. +func (m *GroupMutation) AllowedUsersCleared() bool { + return m.clearedallowed_users +} + +// RemoveAllowedUserIDs removes the "allowed_users" edge to the User entity by IDs. +func (m *GroupMutation) RemoveAllowedUserIDs(ids ...int64) { + if m.removedallowed_users == nil { + m.removedallowed_users = make(map[int64]struct{}) + } + for i := range ids { + delete(m.allowed_users, ids[i]) + m.removedallowed_users[ids[i]] = struct{}{} + } +} + +// RemovedAllowedUsers returns the removed IDs of the "allowed_users" edge to the User entity. +func (m *GroupMutation) RemovedAllowedUsersIDs() (ids []int64) { + for id := range m.removedallowed_users { + ids = append(ids, id) + } + return +} + +// AllowedUsersIDs returns the "allowed_users" edge IDs in the mutation. +func (m *GroupMutation) AllowedUsersIDs() (ids []int64) { + for id := range m.allowed_users { + ids = append(ids, id) + } + return +} + +// ResetAllowedUsers resets all changes to the "allowed_users" edge. +func (m *GroupMutation) ResetAllowedUsers() { + m.allowed_users = nil + m.clearedallowed_users = false + m.removedallowed_users = nil +} + +// Where appends a list predicates to the GroupMutation builder. +func (m *GroupMutation) Where(ps ...predicate.Group) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the GroupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *GroupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Group, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *GroupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *GroupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Group). +func (m *GroupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *GroupMutation) Fields() []string { + fields := make([]string, 0, 13) + if m.created_at != nil { + fields = append(fields, group.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, group.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, group.FieldDeletedAt) + } + if m.name != nil { + fields = append(fields, group.FieldName) + } + if m.description != nil { + fields = append(fields, group.FieldDescription) + } + if m.rate_multiplier != nil { + fields = append(fields, group.FieldRateMultiplier) + } + if m.is_exclusive != nil { + fields = append(fields, group.FieldIsExclusive) + } + if m.status != nil { + fields = append(fields, group.FieldStatus) + } + if m.platform != nil { + fields = append(fields, group.FieldPlatform) + } + if m.subscription_type != nil { + fields = append(fields, group.FieldSubscriptionType) + } + if m.daily_limit_usd != nil { + fields = append(fields, group.FieldDailyLimitUsd) + } + if m.weekly_limit_usd != nil { + fields = append(fields, group.FieldWeeklyLimitUsd) + } + if m.monthly_limit_usd != nil { + fields = append(fields, group.FieldMonthlyLimitUsd) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *GroupMutation) Field(name string) (ent.Value, bool) { + switch name { + case group.FieldCreatedAt: + return m.CreatedAt() + case group.FieldUpdatedAt: + return m.UpdatedAt() + case group.FieldDeletedAt: + return m.DeletedAt() + case group.FieldName: + return m.Name() + case group.FieldDescription: + return m.Description() + case group.FieldRateMultiplier: + return m.RateMultiplier() + case group.FieldIsExclusive: + return m.IsExclusive() + case group.FieldStatus: + return m.Status() + case group.FieldPlatform: + return m.Platform() + case group.FieldSubscriptionType: + return m.SubscriptionType() + case group.FieldDailyLimitUsd: + return m.DailyLimitUsd() + case group.FieldWeeklyLimitUsd: + return m.WeeklyLimitUsd() + case group.FieldMonthlyLimitUsd: + return m.MonthlyLimitUsd() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case group.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case group.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case group.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case group.FieldName: + return m.OldName(ctx) + case group.FieldDescription: + return m.OldDescription(ctx) + case group.FieldRateMultiplier: + return m.OldRateMultiplier(ctx) + case group.FieldIsExclusive: + return m.OldIsExclusive(ctx) + case group.FieldStatus: + return m.OldStatus(ctx) + case group.FieldPlatform: + return m.OldPlatform(ctx) + case group.FieldSubscriptionType: + return m.OldSubscriptionType(ctx) + case group.FieldDailyLimitUsd: + return m.OldDailyLimitUsd(ctx) + case group.FieldWeeklyLimitUsd: + return m.OldWeeklyLimitUsd(ctx) + case group.FieldMonthlyLimitUsd: + return m.OldMonthlyLimitUsd(ctx) + } + return nil, fmt.Errorf("unknown Group field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GroupMutation) SetField(name string, value ent.Value) error { + switch name { + case group.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case group.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case group.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case group.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case group.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case group.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateMultiplier(v) + return nil + case group.FieldIsExclusive: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsExclusive(v) + return nil + case group.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case group.FieldPlatform: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPlatform(v) + return nil + case group.FieldSubscriptionType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSubscriptionType(v) + return nil + case group.FieldDailyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDailyLimitUsd(v) + return nil + case group.FieldWeeklyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWeeklyLimitUsd(v) + return nil + case group.FieldMonthlyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonthlyLimitUsd(v) + return nil + } + return fmt.Errorf("unknown Group field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *GroupMutation) AddedFields() []string { + var fields []string + if m.addrate_multiplier != nil { + fields = append(fields, group.FieldRateMultiplier) + } + if m.adddaily_limit_usd != nil { + fields = append(fields, group.FieldDailyLimitUsd) + } + if m.addweekly_limit_usd != nil { + fields = append(fields, group.FieldWeeklyLimitUsd) + } + if m.addmonthly_limit_usd != nil { + fields = append(fields, group.FieldMonthlyLimitUsd) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *GroupMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case group.FieldRateMultiplier: + return m.AddedRateMultiplier() + case group.FieldDailyLimitUsd: + return m.AddedDailyLimitUsd() + case group.FieldWeeklyLimitUsd: + return m.AddedWeeklyLimitUsd() + case group.FieldMonthlyLimitUsd: + return m.AddedMonthlyLimitUsd() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GroupMutation) AddField(name string, value ent.Value) error { + switch name { + case group.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddRateMultiplier(v) + return nil + case group.FieldDailyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDailyLimitUsd(v) + return nil + case group.FieldWeeklyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddWeeklyLimitUsd(v) + return nil + case group.FieldMonthlyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddMonthlyLimitUsd(v) + return nil + } + return fmt.Errorf("unknown Group numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *GroupMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(group.FieldDeletedAt) { + fields = append(fields, group.FieldDeletedAt) + } + if m.FieldCleared(group.FieldDescription) { + fields = append(fields, group.FieldDescription) + } + if m.FieldCleared(group.FieldDailyLimitUsd) { + fields = append(fields, group.FieldDailyLimitUsd) + } + if m.FieldCleared(group.FieldWeeklyLimitUsd) { + fields = append(fields, group.FieldWeeklyLimitUsd) + } + if m.FieldCleared(group.FieldMonthlyLimitUsd) { + fields = append(fields, group.FieldMonthlyLimitUsd) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *GroupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *GroupMutation) ClearField(name string) error { + switch name { + case group.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case group.FieldDescription: + m.ClearDescription() + return nil + case group.FieldDailyLimitUsd: + m.ClearDailyLimitUsd() + return nil + case group.FieldWeeklyLimitUsd: + m.ClearWeeklyLimitUsd() + return nil + case group.FieldMonthlyLimitUsd: + m.ClearMonthlyLimitUsd() + return nil + } + return fmt.Errorf("unknown Group nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *GroupMutation) ResetField(name string) error { + switch name { + case group.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case group.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case group.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case group.FieldName: + m.ResetName() + return nil + case group.FieldDescription: + m.ResetDescription() + return nil + case group.FieldRateMultiplier: + m.ResetRateMultiplier() + return nil + case group.FieldIsExclusive: + m.ResetIsExclusive() + return nil + case group.FieldStatus: + m.ResetStatus() + return nil + case group.FieldPlatform: + m.ResetPlatform() + return nil + case group.FieldSubscriptionType: + m.ResetSubscriptionType() + return nil + case group.FieldDailyLimitUsd: + m.ResetDailyLimitUsd() + return nil + case group.FieldWeeklyLimitUsd: + m.ResetWeeklyLimitUsd() + return nil + case group.FieldMonthlyLimitUsd: + m.ResetMonthlyLimitUsd() + return nil + } + return fmt.Errorf("unknown Group field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *GroupMutation) AddedEdges() []string { + edges := make([]string, 0, 5) + if m.api_keys != nil { + edges = append(edges, group.EdgeAPIKeys) + } + if m.redeem_codes != nil { + edges = append(edges, group.EdgeRedeemCodes) + } + if m.subscriptions != nil { + edges = append(edges, group.EdgeSubscriptions) + } + if m.accounts != nil { + edges = append(edges, group.EdgeAccounts) + } + if m.allowed_users != nil { + edges = append(edges, group.EdgeAllowedUsers) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *GroupMutation) AddedIDs(name string) []ent.Value { + switch name { + case group.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.api_keys)) + for id := range m.api_keys { + ids = append(ids, id) + } + return ids + case group.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.redeem_codes)) + for id := range m.redeem_codes { + ids = append(ids, id) + } + return ids + case group.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.subscriptions)) + for id := range m.subscriptions { + ids = append(ids, id) + } + return ids + case group.EdgeAccounts: + ids := make([]ent.Value, 0, len(m.accounts)) + for id := range m.accounts { + ids = append(ids, id) + } + return ids + case group.EdgeAllowedUsers: + ids := make([]ent.Value, 0, len(m.allowed_users)) + for id := range m.allowed_users { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *GroupMutation) RemovedEdges() []string { + edges := make([]string, 0, 5) + if m.removedapi_keys != nil { + edges = append(edges, group.EdgeAPIKeys) + } + if m.removedredeem_codes != nil { + edges = append(edges, group.EdgeRedeemCodes) + } + if m.removedsubscriptions != nil { + edges = append(edges, group.EdgeSubscriptions) + } + if m.removedaccounts != nil { + edges = append(edges, group.EdgeAccounts) + } + if m.removedallowed_users != nil { + edges = append(edges, group.EdgeAllowedUsers) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *GroupMutation) RemovedIDs(name string) []ent.Value { + switch name { + case group.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.removedapi_keys)) + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return ids + case group.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.removedredeem_codes)) + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return ids + case group.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.removedsubscriptions)) + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return ids + case group.EdgeAccounts: + ids := make([]ent.Value, 0, len(m.removedaccounts)) + for id := range m.removedaccounts { + ids = append(ids, id) + } + return ids + case group.EdgeAllowedUsers: + ids := make([]ent.Value, 0, len(m.removedallowed_users)) + for id := range m.removedallowed_users { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *GroupMutation) ClearedEdges() []string { + edges := make([]string, 0, 5) + if m.clearedapi_keys { + edges = append(edges, group.EdgeAPIKeys) + } + if m.clearedredeem_codes { + edges = append(edges, group.EdgeRedeemCodes) + } + if m.clearedsubscriptions { + edges = append(edges, group.EdgeSubscriptions) + } + if m.clearedaccounts { + edges = append(edges, group.EdgeAccounts) + } + if m.clearedallowed_users { + edges = append(edges, group.EdgeAllowedUsers) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *GroupMutation) EdgeCleared(name string) bool { + switch name { + case group.EdgeAPIKeys: + return m.clearedapi_keys + case group.EdgeRedeemCodes: + return m.clearedredeem_codes + case group.EdgeSubscriptions: + return m.clearedsubscriptions + case group.EdgeAccounts: + return m.clearedaccounts + case group.EdgeAllowedUsers: + return m.clearedallowed_users + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *GroupMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Group unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *GroupMutation) ResetEdge(name string) error { + switch name { + case group.EdgeAPIKeys: + m.ResetAPIKeys() + return nil + case group.EdgeRedeemCodes: + m.ResetRedeemCodes() + return nil + case group.EdgeSubscriptions: + m.ResetSubscriptions() + return nil + case group.EdgeAccounts: + m.ResetAccounts() + return nil + case group.EdgeAllowedUsers: + m.ResetAllowedUsers() + return nil + } + return fmt.Errorf("unknown Group edge %s", name) +} + +// ProxyMutation represents an operation that mutates the Proxy nodes in the graph. +type ProxyMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + protocol *string + host *string + port *int + addport *int + username *string + password *string + status *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Proxy, error) + predicates []predicate.Proxy +} + +var _ ent.Mutation = (*ProxyMutation)(nil) + +// proxyOption allows management of the mutation configuration using functional options. +type proxyOption func(*ProxyMutation) + +// newProxyMutation creates new mutation for the Proxy entity. +func newProxyMutation(c config, op Op, opts ...proxyOption) *ProxyMutation { + m := &ProxyMutation{ + config: c, + op: op, + typ: TypeProxy, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withProxyID sets the ID field of the mutation. +func withProxyID(id int64) proxyOption { + return func(m *ProxyMutation) { + var ( + err error + once sync.Once + value *Proxy + ) + m.oldValue = func(ctx context.Context) (*Proxy, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Proxy.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withProxy sets the old Proxy of the mutation. +func withProxy(node *Proxy) proxyOption { + return func(m *ProxyMutation) { + m.oldValue = func(context.Context) (*Proxy, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ProxyMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ProxyMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ProxyMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ProxyMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Proxy.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *ProxyMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *ProxyMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *ProxyMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *ProxyMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *ProxyMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *ProxyMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *ProxyMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *ProxyMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *ProxyMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[proxy.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *ProxyMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[proxy.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *ProxyMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, proxy.FieldDeletedAt) +} + +// SetName sets the "name" field. +func (m *ProxyMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ProxyMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ProxyMutation) ResetName() { + m.name = nil +} + +// SetProtocol sets the "protocol" field. +func (m *ProxyMutation) SetProtocol(s string) { + m.protocol = &s +} + +// Protocol returns the value of the "protocol" field in the mutation. +func (m *ProxyMutation) Protocol() (r string, exists bool) { + v := m.protocol + if v == nil { + return + } + return *v, true +} + +// OldProtocol returns the old "protocol" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldProtocol(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProtocol is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProtocol requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProtocol: %w", err) + } + return oldValue.Protocol, nil +} + +// ResetProtocol resets all changes to the "protocol" field. +func (m *ProxyMutation) ResetProtocol() { + m.protocol = nil +} + +// SetHost sets the "host" field. +func (m *ProxyMutation) SetHost(s string) { + m.host = &s +} + +// Host returns the value of the "host" field in the mutation. +func (m *ProxyMutation) Host() (r string, exists bool) { + v := m.host + if v == nil { + return + } + return *v, true +} + +// OldHost returns the old "host" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldHost(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHost: %w", err) + } + return oldValue.Host, nil +} + +// ResetHost resets all changes to the "host" field. +func (m *ProxyMutation) ResetHost() { + m.host = nil +} + +// SetPort sets the "port" field. +func (m *ProxyMutation) SetPort(i int) { + m.port = &i + m.addport = nil +} + +// Port returns the value of the "port" field in the mutation. +func (m *ProxyMutation) Port() (r int, exists bool) { + v := m.port + if v == nil { + return + } + return *v, true +} + +// OldPort returns the old "port" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldPort(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPort is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPort requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPort: %w", err) + } + return oldValue.Port, nil +} + +// AddPort adds i to the "port" field. +func (m *ProxyMutation) AddPort(i int) { + if m.addport != nil { + *m.addport += i + } else { + m.addport = &i + } +} + +// AddedPort returns the value that was added to the "port" field in this mutation. +func (m *ProxyMutation) AddedPort() (r int, exists bool) { + v := m.addport + if v == nil { + return + } + return *v, true +} + +// ResetPort resets all changes to the "port" field. +func (m *ProxyMutation) ResetPort() { + m.port = nil + m.addport = nil +} + +// SetUsername sets the "username" field. +func (m *ProxyMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *ProxyMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldUsername(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ClearUsername clears the value of the "username" field. +func (m *ProxyMutation) ClearUsername() { + m.username = nil + m.clearedFields[proxy.FieldUsername] = struct{}{} +} + +// UsernameCleared returns if the "username" field was cleared in this mutation. +func (m *ProxyMutation) UsernameCleared() bool { + _, ok := m.clearedFields[proxy.FieldUsername] + return ok +} + +// ResetUsername resets all changes to the "username" field. +func (m *ProxyMutation) ResetUsername() { + m.username = nil + delete(m.clearedFields, proxy.FieldUsername) +} + +// SetPassword sets the "password" field. +func (m *ProxyMutation) SetPassword(s string) { + m.password = &s +} + +// Password returns the value of the "password" field in the mutation. +func (m *ProxyMutation) Password() (r string, exists bool) { + v := m.password + if v == nil { + return + } + return *v, true +} + +// OldPassword returns the old "password" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldPassword(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPassword is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPassword requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPassword: %w", err) + } + return oldValue.Password, nil +} + +// ClearPassword clears the value of the "password" field. +func (m *ProxyMutation) ClearPassword() { + m.password = nil + m.clearedFields[proxy.FieldPassword] = struct{}{} +} + +// PasswordCleared returns if the "password" field was cleared in this mutation. +func (m *ProxyMutation) PasswordCleared() bool { + _, ok := m.clearedFields[proxy.FieldPassword] + return ok +} + +// ResetPassword resets all changes to the "password" field. +func (m *ProxyMutation) ResetPassword() { + m.password = nil + delete(m.clearedFields, proxy.FieldPassword) +} + +// SetStatus sets the "status" field. +func (m *ProxyMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *ProxyMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *ProxyMutation) ResetStatus() { + m.status = nil +} + +// Where appends a list predicates to the ProxyMutation builder. +func (m *ProxyMutation) Where(ps ...predicate.Proxy) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ProxyMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ProxyMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Proxy, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ProxyMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ProxyMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Proxy). +func (m *ProxyMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ProxyMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.created_at != nil { + fields = append(fields, proxy.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, proxy.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, proxy.FieldDeletedAt) + } + if m.name != nil { + fields = append(fields, proxy.FieldName) + } + if m.protocol != nil { + fields = append(fields, proxy.FieldProtocol) + } + if m.host != nil { + fields = append(fields, proxy.FieldHost) + } + if m.port != nil { + fields = append(fields, proxy.FieldPort) + } + if m.username != nil { + fields = append(fields, proxy.FieldUsername) + } + if m.password != nil { + fields = append(fields, proxy.FieldPassword) + } + if m.status != nil { + fields = append(fields, proxy.FieldStatus) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ProxyMutation) Field(name string) (ent.Value, bool) { + switch name { + case proxy.FieldCreatedAt: + return m.CreatedAt() + case proxy.FieldUpdatedAt: + return m.UpdatedAt() + case proxy.FieldDeletedAt: + return m.DeletedAt() + case proxy.FieldName: + return m.Name() + case proxy.FieldProtocol: + return m.Protocol() + case proxy.FieldHost: + return m.Host() + case proxy.FieldPort: + return m.Port() + case proxy.FieldUsername: + return m.Username() + case proxy.FieldPassword: + return m.Password() + case proxy.FieldStatus: + return m.Status() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ProxyMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case proxy.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case proxy.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case proxy.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case proxy.FieldName: + return m.OldName(ctx) + case proxy.FieldProtocol: + return m.OldProtocol(ctx) + case proxy.FieldHost: + return m.OldHost(ctx) + case proxy.FieldPort: + return m.OldPort(ctx) + case proxy.FieldUsername: + return m.OldUsername(ctx) + case proxy.FieldPassword: + return m.OldPassword(ctx) + case proxy.FieldStatus: + return m.OldStatus(ctx) + } + return nil, fmt.Errorf("unknown Proxy field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ProxyMutation) SetField(name string, value ent.Value) error { + switch name { + case proxy.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case proxy.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case proxy.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case proxy.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case proxy.FieldProtocol: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProtocol(v) + return nil + case proxy.FieldHost: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHost(v) + return nil + case proxy.FieldPort: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPort(v) + return nil + case proxy.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case proxy.FieldPassword: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPassword(v) + return nil + case proxy.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + } + return fmt.Errorf("unknown Proxy field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ProxyMutation) AddedFields() []string { + var fields []string + if m.addport != nil { + fields = append(fields, proxy.FieldPort) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ProxyMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case proxy.FieldPort: + return m.AddedPort() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ProxyMutation) AddField(name string, value ent.Value) error { + switch name { + case proxy.FieldPort: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPort(v) + return nil + } + return fmt.Errorf("unknown Proxy numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ProxyMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(proxy.FieldDeletedAt) { + fields = append(fields, proxy.FieldDeletedAt) + } + if m.FieldCleared(proxy.FieldUsername) { + fields = append(fields, proxy.FieldUsername) + } + if m.FieldCleared(proxy.FieldPassword) { + fields = append(fields, proxy.FieldPassword) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ProxyMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ProxyMutation) ClearField(name string) error { + switch name { + case proxy.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case proxy.FieldUsername: + m.ClearUsername() + return nil + case proxy.FieldPassword: + m.ClearPassword() + return nil + } + return fmt.Errorf("unknown Proxy nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ProxyMutation) ResetField(name string) error { + switch name { + case proxy.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case proxy.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case proxy.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case proxy.FieldName: + m.ResetName() + return nil + case proxy.FieldProtocol: + m.ResetProtocol() + return nil + case proxy.FieldHost: + m.ResetHost() + return nil + case proxy.FieldPort: + m.ResetPort() + return nil + case proxy.FieldUsername: + m.ResetUsername() + return nil + case proxy.FieldPassword: + m.ResetPassword() + return nil + case proxy.FieldStatus: + m.ResetStatus() + return nil + } + return fmt.Errorf("unknown Proxy field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ProxyMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ProxyMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ProxyMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ProxyMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ProxyMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ProxyMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ProxyMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Proxy unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ProxyMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Proxy edge %s", name) +} + +// RedeemCodeMutation represents an operation that mutates the RedeemCode nodes in the graph. +type RedeemCodeMutation struct { + config + op Op + typ string + id *int64 + code *string + _type *string + value *float64 + addvalue *float64 + status *string + used_at *time.Time + notes *string + created_at *time.Time + validity_days *int + addvalidity_days *int + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + done bool + oldValue func(context.Context) (*RedeemCode, error) + predicates []predicate.RedeemCode +} + +var _ ent.Mutation = (*RedeemCodeMutation)(nil) + +// redeemcodeOption allows management of the mutation configuration using functional options. +type redeemcodeOption func(*RedeemCodeMutation) + +// newRedeemCodeMutation creates new mutation for the RedeemCode entity. +func newRedeemCodeMutation(c config, op Op, opts ...redeemcodeOption) *RedeemCodeMutation { + m := &RedeemCodeMutation{ + config: c, + op: op, + typ: TypeRedeemCode, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withRedeemCodeID sets the ID field of the mutation. +func withRedeemCodeID(id int64) redeemcodeOption { + return func(m *RedeemCodeMutation) { + var ( + err error + once sync.Once + value *RedeemCode + ) + m.oldValue = func(ctx context.Context) (*RedeemCode, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().RedeemCode.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withRedeemCode sets the old RedeemCode of the mutation. +func withRedeemCode(node *RedeemCode) redeemcodeOption { + return func(m *RedeemCodeMutation) { + m.oldValue = func(context.Context) (*RedeemCode, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m RedeemCodeMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m RedeemCodeMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *RedeemCodeMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *RedeemCodeMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().RedeemCode.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCode sets the "code" field. +func (m *RedeemCodeMutation) SetCode(s string) { + m.code = &s +} + +// Code returns the value of the "code" field in the mutation. +func (m *RedeemCodeMutation) Code() (r string, exists bool) { + v := m.code + if v == nil { + return + } + return *v, true +} + +// OldCode returns the old "code" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldCode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCode: %w", err) + } + return oldValue.Code, nil +} + +// ResetCode resets all changes to the "code" field. +func (m *RedeemCodeMutation) ResetCode() { + m.code = nil +} + +// SetType sets the "type" field. +func (m *RedeemCodeMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *RedeemCodeMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *RedeemCodeMutation) ResetType() { + m._type = nil +} + +// SetValue sets the "value" field. +func (m *RedeemCodeMutation) SetValue(f float64) { + m.value = &f + m.addvalue = nil +} + +// Value returns the value of the "value" field in the mutation. +func (m *RedeemCodeMutation) Value() (r float64, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldValue(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// AddValue adds f to the "value" field. +func (m *RedeemCodeMutation) AddValue(f float64) { + if m.addvalue != nil { + *m.addvalue += f + } else { + m.addvalue = &f + } +} + +// AddedValue returns the value that was added to the "value" field in this mutation. +func (m *RedeemCodeMutation) AddedValue() (r float64, exists bool) { + v := m.addvalue + if v == nil { + return + } + return *v, true +} + +// ResetValue resets all changes to the "value" field. +func (m *RedeemCodeMutation) ResetValue() { + m.value = nil + m.addvalue = nil +} + +// SetStatus sets the "status" field. +func (m *RedeemCodeMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *RedeemCodeMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *RedeemCodeMutation) ResetStatus() { + m.status = nil +} + +// SetUsedBy sets the "used_by" field. +func (m *RedeemCodeMutation) SetUsedBy(i int64) { + m.user = &i +} + +// UsedBy returns the value of the "used_by" field in the mutation. +func (m *RedeemCodeMutation) UsedBy() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUsedBy returns the old "used_by" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldUsedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedBy: %w", err) + } + return oldValue.UsedBy, nil +} + +// ClearUsedBy clears the value of the "used_by" field. +func (m *RedeemCodeMutation) ClearUsedBy() { + m.user = nil + m.clearedFields[redeemcode.FieldUsedBy] = struct{}{} +} + +// UsedByCleared returns if the "used_by" field was cleared in this mutation. +func (m *RedeemCodeMutation) UsedByCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldUsedBy] + return ok +} + +// ResetUsedBy resets all changes to the "used_by" field. +func (m *RedeemCodeMutation) ResetUsedBy() { + m.user = nil + delete(m.clearedFields, redeemcode.FieldUsedBy) +} + +// SetUsedAt sets the "used_at" field. +func (m *RedeemCodeMutation) SetUsedAt(t time.Time) { + m.used_at = &t +} + +// UsedAt returns the value of the "used_at" field in the mutation. +func (m *RedeemCodeMutation) UsedAt() (r time.Time, exists bool) { + v := m.used_at + if v == nil { + return + } + return *v, true +} + +// OldUsedAt returns the old "used_at" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldUsedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedAt: %w", err) + } + return oldValue.UsedAt, nil +} + +// ClearUsedAt clears the value of the "used_at" field. +func (m *RedeemCodeMutation) ClearUsedAt() { + m.used_at = nil + m.clearedFields[redeemcode.FieldUsedAt] = struct{}{} +} + +// UsedAtCleared returns if the "used_at" field was cleared in this mutation. +func (m *RedeemCodeMutation) UsedAtCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldUsedAt] + return ok +} + +// ResetUsedAt resets all changes to the "used_at" field. +func (m *RedeemCodeMutation) ResetUsedAt() { + m.used_at = nil + delete(m.clearedFields, redeemcode.FieldUsedAt) +} + +// SetNotes sets the "notes" field. +func (m *RedeemCodeMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *RedeemCodeMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldNotes(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ClearNotes clears the value of the "notes" field. +func (m *RedeemCodeMutation) ClearNotes() { + m.notes = nil + m.clearedFields[redeemcode.FieldNotes] = struct{}{} +} + +// NotesCleared returns if the "notes" field was cleared in this mutation. +func (m *RedeemCodeMutation) NotesCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldNotes] + return ok +} + +// ResetNotes resets all changes to the "notes" field. +func (m *RedeemCodeMutation) ResetNotes() { + m.notes = nil + delete(m.clearedFields, redeemcode.FieldNotes) +} + +// SetCreatedAt sets the "created_at" field. +func (m *RedeemCodeMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *RedeemCodeMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *RedeemCodeMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetGroupID sets the "group_id" field. +func (m *RedeemCodeMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *RedeemCodeMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldGroupID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ClearGroupID clears the value of the "group_id" field. +func (m *RedeemCodeMutation) ClearGroupID() { + m.group = nil + m.clearedFields[redeemcode.FieldGroupID] = struct{}{} +} + +// GroupIDCleared returns if the "group_id" field was cleared in this mutation. +func (m *RedeemCodeMutation) GroupIDCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldGroupID] + return ok +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *RedeemCodeMutation) ResetGroupID() { + m.group = nil + delete(m.clearedFields, redeemcode.FieldGroupID) +} + +// SetValidityDays sets the "validity_days" field. +func (m *RedeemCodeMutation) SetValidityDays(i int) { + m.validity_days = &i + m.addvalidity_days = nil +} + +// ValidityDays returns the value of the "validity_days" field in the mutation. +func (m *RedeemCodeMutation) ValidityDays() (r int, exists bool) { + v := m.validity_days + if v == nil { + return + } + return *v, true +} + +// OldValidityDays returns the old "validity_days" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldValidityDays(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValidityDays is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValidityDays requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValidityDays: %w", err) + } + return oldValue.ValidityDays, nil +} + +// AddValidityDays adds i to the "validity_days" field. +func (m *RedeemCodeMutation) AddValidityDays(i int) { + if m.addvalidity_days != nil { + *m.addvalidity_days += i + } else { + m.addvalidity_days = &i + } +} + +// AddedValidityDays returns the value that was added to the "validity_days" field in this mutation. +func (m *RedeemCodeMutation) AddedValidityDays() (r int, exists bool) { + v := m.addvalidity_days + if v == nil { + return + } + return *v, true +} + +// ResetValidityDays resets all changes to the "validity_days" field. +func (m *RedeemCodeMutation) ResetValidityDays() { + m.validity_days = nil + m.addvalidity_days = nil +} + +// SetUserID sets the "user" edge to the User entity by id. +func (m *RedeemCodeMutation) SetUserID(id int64) { + m.user = &id +} + +// ClearUser clears the "user" edge to the User entity. +func (m *RedeemCodeMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[redeemcode.FieldUsedBy] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *RedeemCodeMutation) UserCleared() bool { + return m.UsedByCleared() || m.cleareduser +} + +// UserID returns the "user" edge ID in the mutation. +func (m *RedeemCodeMutation) UserID() (id int64, exists bool) { + if m.user != nil { + return *m.user, true + } + return +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *RedeemCodeMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *RedeemCodeMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *RedeemCodeMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[redeemcode.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *RedeemCodeMutation) GroupCleared() bool { + return m.GroupIDCleared() || m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *RedeemCodeMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *RedeemCodeMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the RedeemCodeMutation builder. +func (m *RedeemCodeMutation) Where(ps ...predicate.RedeemCode) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the RedeemCodeMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *RedeemCodeMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.RedeemCode, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *RedeemCodeMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *RedeemCodeMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (RedeemCode). +func (m *RedeemCodeMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *RedeemCodeMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.code != nil { + fields = append(fields, redeemcode.FieldCode) + } + if m._type != nil { + fields = append(fields, redeemcode.FieldType) + } + if m.value != nil { + fields = append(fields, redeemcode.FieldValue) + } + if m.status != nil { + fields = append(fields, redeemcode.FieldStatus) + } + if m.user != nil { + fields = append(fields, redeemcode.FieldUsedBy) + } + if m.used_at != nil { + fields = append(fields, redeemcode.FieldUsedAt) + } + if m.notes != nil { + fields = append(fields, redeemcode.FieldNotes) + } + if m.created_at != nil { + fields = append(fields, redeemcode.FieldCreatedAt) + } + if m.group != nil { + fields = append(fields, redeemcode.FieldGroupID) + } + if m.validity_days != nil { + fields = append(fields, redeemcode.FieldValidityDays) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *RedeemCodeMutation) Field(name string) (ent.Value, bool) { + switch name { + case redeemcode.FieldCode: + return m.Code() + case redeemcode.FieldType: + return m.GetType() + case redeemcode.FieldValue: + return m.Value() + case redeemcode.FieldStatus: + return m.Status() + case redeemcode.FieldUsedBy: + return m.UsedBy() + case redeemcode.FieldUsedAt: + return m.UsedAt() + case redeemcode.FieldNotes: + return m.Notes() + case redeemcode.FieldCreatedAt: + return m.CreatedAt() + case redeemcode.FieldGroupID: + return m.GroupID() + case redeemcode.FieldValidityDays: + return m.ValidityDays() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *RedeemCodeMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case redeemcode.FieldCode: + return m.OldCode(ctx) + case redeemcode.FieldType: + return m.OldType(ctx) + case redeemcode.FieldValue: + return m.OldValue(ctx) + case redeemcode.FieldStatus: + return m.OldStatus(ctx) + case redeemcode.FieldUsedBy: + return m.OldUsedBy(ctx) + case redeemcode.FieldUsedAt: + return m.OldUsedAt(ctx) + case redeemcode.FieldNotes: + return m.OldNotes(ctx) + case redeemcode.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case redeemcode.FieldGroupID: + return m.OldGroupID(ctx) + case redeemcode.FieldValidityDays: + return m.OldValidityDays(ctx) + } + return nil, fmt.Errorf("unknown RedeemCode field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RedeemCodeMutation) SetField(name string, value ent.Value) error { + switch name { + case redeemcode.FieldCode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCode(v) + return nil + case redeemcode.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case redeemcode.FieldValue: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + case redeemcode.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case redeemcode.FieldUsedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedBy(v) + return nil + case redeemcode.FieldUsedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedAt(v) + return nil + case redeemcode.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + case redeemcode.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case redeemcode.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case redeemcode.FieldValidityDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValidityDays(v) + return nil + } + return fmt.Errorf("unknown RedeemCode field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *RedeemCodeMutation) AddedFields() []string { + var fields []string + if m.addvalue != nil { + fields = append(fields, redeemcode.FieldValue) + } + if m.addvalidity_days != nil { + fields = append(fields, redeemcode.FieldValidityDays) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *RedeemCodeMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case redeemcode.FieldValue: + return m.AddedValue() + case redeemcode.FieldValidityDays: + return m.AddedValidityDays() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RedeemCodeMutation) AddField(name string, value ent.Value) error { + switch name { + case redeemcode.FieldValue: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddValue(v) + return nil + case redeemcode.FieldValidityDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddValidityDays(v) + return nil + } + return fmt.Errorf("unknown RedeemCode numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *RedeemCodeMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(redeemcode.FieldUsedBy) { + fields = append(fields, redeemcode.FieldUsedBy) + } + if m.FieldCleared(redeemcode.FieldUsedAt) { + fields = append(fields, redeemcode.FieldUsedAt) + } + if m.FieldCleared(redeemcode.FieldNotes) { + fields = append(fields, redeemcode.FieldNotes) + } + if m.FieldCleared(redeemcode.FieldGroupID) { + fields = append(fields, redeemcode.FieldGroupID) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *RedeemCodeMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *RedeemCodeMutation) ClearField(name string) error { + switch name { + case redeemcode.FieldUsedBy: + m.ClearUsedBy() + return nil + case redeemcode.FieldUsedAt: + m.ClearUsedAt() + return nil + case redeemcode.FieldNotes: + m.ClearNotes() + return nil + case redeemcode.FieldGroupID: + m.ClearGroupID() + return nil + } + return fmt.Errorf("unknown RedeemCode nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *RedeemCodeMutation) ResetField(name string) error { + switch name { + case redeemcode.FieldCode: + m.ResetCode() + return nil + case redeemcode.FieldType: + m.ResetType() + return nil + case redeemcode.FieldValue: + m.ResetValue() + return nil + case redeemcode.FieldStatus: + m.ResetStatus() + return nil + case redeemcode.FieldUsedBy: + m.ResetUsedBy() + return nil + case redeemcode.FieldUsedAt: + m.ResetUsedAt() + return nil + case redeemcode.FieldNotes: + m.ResetNotes() + return nil + case redeemcode.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case redeemcode.FieldGroupID: + m.ResetGroupID() + return nil + case redeemcode.FieldValidityDays: + m.ResetValidityDays() + return nil + } + return fmt.Errorf("unknown RedeemCode field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *RedeemCodeMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, redeemcode.EdgeUser) + } + if m.group != nil { + edges = append(edges, redeemcode.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *RedeemCodeMutation) AddedIDs(name string) []ent.Value { + switch name { + case redeemcode.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case redeemcode.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *RedeemCodeMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *RedeemCodeMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *RedeemCodeMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, redeemcode.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, redeemcode.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *RedeemCodeMutation) EdgeCleared(name string) bool { + switch name { + case redeemcode.EdgeUser: + return m.cleareduser + case redeemcode.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *RedeemCodeMutation) ClearEdge(name string) error { + switch name { + case redeemcode.EdgeUser: + m.ClearUser() + return nil + case redeemcode.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown RedeemCode unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *RedeemCodeMutation) ResetEdge(name string) error { + switch name { + case redeemcode.EdgeUser: + m.ResetUser() + return nil + case redeemcode.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown RedeemCode edge %s", name) +} + +// SettingMutation represents an operation that mutates the Setting nodes in the graph. +type SettingMutation struct { + config + op Op + typ string + id *int64 + key *string + value *string + updated_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Setting, error) + predicates []predicate.Setting +} + +var _ ent.Mutation = (*SettingMutation)(nil) + +// settingOption allows management of the mutation configuration using functional options. +type settingOption func(*SettingMutation) + +// newSettingMutation creates new mutation for the Setting entity. +func newSettingMutation(c config, op Op, opts ...settingOption) *SettingMutation { + m := &SettingMutation{ + config: c, + op: op, + typ: TypeSetting, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withSettingID sets the ID field of the mutation. +func withSettingID(id int64) settingOption { + return func(m *SettingMutation) { + var ( + err error + once sync.Once + value *Setting + ) + m.oldValue = func(ctx context.Context) (*Setting, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Setting.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withSetting sets the old Setting of the mutation. +func withSetting(node *Setting) settingOption { + return func(m *SettingMutation) { + m.oldValue = func(context.Context) (*Setting, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m SettingMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m SettingMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *SettingMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *SettingMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Setting.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetKey sets the "key" field. +func (m *SettingMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the value of the "key" field in the mutation. +func (m *SettingMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old "key" field's value of the Setting entity. +// If the Setting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SettingMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey resets all changes to the "key" field. +func (m *SettingMutation) ResetKey() { + m.key = nil +} + +// SetValue sets the "value" field. +func (m *SettingMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *SettingMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the Setting entity. +// If the Setting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SettingMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *SettingMutation) ResetValue() { + m.value = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *SettingMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *SettingMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Setting entity. +// If the Setting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SettingMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *SettingMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// Where appends a list predicates to the SettingMutation builder. +func (m *SettingMutation) Where(ps ...predicate.Setting) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the SettingMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *SettingMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Setting, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *SettingMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *SettingMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Setting). +func (m *SettingMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *SettingMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.key != nil { + fields = append(fields, setting.FieldKey) + } + if m.value != nil { + fields = append(fields, setting.FieldValue) + } + if m.updated_at != nil { + fields = append(fields, setting.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *SettingMutation) Field(name string) (ent.Value, bool) { + switch name { + case setting.FieldKey: + return m.Key() + case setting.FieldValue: + return m.Value() + case setting.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *SettingMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case setting.FieldKey: + return m.OldKey(ctx) + case setting.FieldValue: + return m.OldValue(ctx) + case setting.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Setting field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *SettingMutation) SetField(name string, value ent.Value) error { + switch name { + case setting.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case setting.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + case setting.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Setting field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *SettingMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *SettingMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *SettingMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Setting numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *SettingMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *SettingMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *SettingMutation) ClearField(name string) error { + return fmt.Errorf("unknown Setting nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *SettingMutation) ResetField(name string) error { + switch name { + case setting.FieldKey: + m.ResetKey() + return nil + case setting.FieldValue: + m.ResetValue() + return nil + case setting.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Setting field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *SettingMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *SettingMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *SettingMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *SettingMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *SettingMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *SettingMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *SettingMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Setting unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *SettingMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Setting edge %s", name) +} + +// UserMutation represents an operation that mutates the User nodes in the graph. +type UserMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + email *string + password_hash *string + role *string + balance *float64 + addbalance *float64 + concurrency *int + addconcurrency *int + status *string + username *string + wechat *string + notes *string + clearedFields map[string]struct{} + api_keys map[int64]struct{} + removedapi_keys map[int64]struct{} + clearedapi_keys bool + redeem_codes map[int64]struct{} + removedredeem_codes map[int64]struct{} + clearedredeem_codes bool + subscriptions map[int64]struct{} + removedsubscriptions map[int64]struct{} + clearedsubscriptions bool + assigned_subscriptions map[int64]struct{} + removedassigned_subscriptions map[int64]struct{} + clearedassigned_subscriptions bool + allowed_groups map[int64]struct{} + removedallowed_groups map[int64]struct{} + clearedallowed_groups bool + done bool + oldValue func(context.Context) (*User, error) + predicates []predicate.User +} + +var _ ent.Mutation = (*UserMutation)(nil) + +// userOption allows management of the mutation configuration using functional options. +type userOption func(*UserMutation) + +// newUserMutation creates new mutation for the User entity. +func newUserMutation(c config, op Op, opts ...userOption) *UserMutation { + m := &UserMutation{ + config: c, + op: op, + typ: TypeUser, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserID sets the ID field of the mutation. +func withUserID(id int64) userOption { + return func(m *UserMutation) { + var ( + err error + once sync.Once + value *User + ) + m.oldValue = func(ctx context.Context) (*User, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().User.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUser sets the old User of the mutation. +func withUser(node *User) userOption { + return func(m *UserMutation) { + m.oldValue = func(context.Context) (*User, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().User.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *UserMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *UserMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *UserMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[user.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *UserMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[user.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *UserMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, user.FieldDeletedAt) +} + +// SetEmail sets the "email" field. +func (m *UserMutation) SetEmail(s string) { + m.email = &s +} + +// Email returns the value of the "email" field in the mutation. +func (m *UserMutation) Email() (r string, exists bool) { + v := m.email + if v == nil { + return + } + return *v, true +} + +// OldEmail returns the old "email" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEmail: %w", err) + } + return oldValue.Email, nil +} + +// ResetEmail resets all changes to the "email" field. +func (m *UserMutation) ResetEmail() { + m.email = nil +} + +// SetPasswordHash sets the "password_hash" field. +func (m *UserMutation) SetPasswordHash(s string) { + m.password_hash = &s +} + +// PasswordHash returns the value of the "password_hash" field in the mutation. +func (m *UserMutation) PasswordHash() (r string, exists bool) { + v := m.password_hash + if v == nil { + return + } + return *v, true +} + +// OldPasswordHash returns the old "password_hash" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldPasswordHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPasswordHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPasswordHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPasswordHash: %w", err) + } + return oldValue.PasswordHash, nil +} + +// ResetPasswordHash resets all changes to the "password_hash" field. +func (m *UserMutation) ResetPasswordHash() { + m.password_hash = nil +} + +// SetRole sets the "role" field. +func (m *UserMutation) SetRole(s string) { + m.role = &s +} + +// Role returns the value of the "role" field in the mutation. +func (m *UserMutation) Role() (r string, exists bool) { + v := m.role + if v == nil { + return + } + return *v, true +} + +// OldRole returns the old "role" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldRole(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRole is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRole requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRole: %w", err) + } + return oldValue.Role, nil +} + +// ResetRole resets all changes to the "role" field. +func (m *UserMutation) ResetRole() { + m.role = nil +} + +// SetBalance sets the "balance" field. +func (m *UserMutation) SetBalance(f float64) { + m.balance = &f + m.addbalance = nil +} + +// Balance returns the value of the "balance" field in the mutation. +func (m *UserMutation) Balance() (r float64, exists bool) { + v := m.balance + if v == nil { + return + } + return *v, true +} + +// OldBalance returns the old "balance" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldBalance(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBalance is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBalance requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBalance: %w", err) + } + return oldValue.Balance, nil +} + +// AddBalance adds f to the "balance" field. +func (m *UserMutation) AddBalance(f float64) { + if m.addbalance != nil { + *m.addbalance += f + } else { + m.addbalance = &f + } +} + +// AddedBalance returns the value that was added to the "balance" field in this mutation. +func (m *UserMutation) AddedBalance() (r float64, exists bool) { + v := m.addbalance + if v == nil { + return + } + return *v, true +} + +// ResetBalance resets all changes to the "balance" field. +func (m *UserMutation) ResetBalance() { + m.balance = nil + m.addbalance = nil +} + +// SetConcurrency sets the "concurrency" field. +func (m *UserMutation) SetConcurrency(i int) { + m.concurrency = &i + m.addconcurrency = nil +} + +// Concurrency returns the value of the "concurrency" field in the mutation. +func (m *UserMutation) Concurrency() (r int, exists bool) { + v := m.concurrency + if v == nil { + return + } + return *v, true +} + +// OldConcurrency returns the old "concurrency" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldConcurrency(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConcurrency is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConcurrency requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConcurrency: %w", err) + } + return oldValue.Concurrency, nil +} + +// AddConcurrency adds i to the "concurrency" field. +func (m *UserMutation) AddConcurrency(i int) { + if m.addconcurrency != nil { + *m.addconcurrency += i + } else { + m.addconcurrency = &i + } +} + +// AddedConcurrency returns the value that was added to the "concurrency" field in this mutation. +func (m *UserMutation) AddedConcurrency() (r int, exists bool) { + v := m.addconcurrency + if v == nil { + return + } + return *v, true +} + +// ResetConcurrency resets all changes to the "concurrency" field. +func (m *UserMutation) ResetConcurrency() { + m.concurrency = nil + m.addconcurrency = nil +} + +// SetStatus sets the "status" field. +func (m *UserMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *UserMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *UserMutation) ResetStatus() { + m.status = nil +} + +// SetUsername sets the "username" field. +func (m *UserMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *UserMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ResetUsername resets all changes to the "username" field. +func (m *UserMutation) ResetUsername() { + m.username = nil +} + +// SetWechat sets the "wechat" field. +func (m *UserMutation) SetWechat(s string) { + m.wechat = &s +} + +// Wechat returns the value of the "wechat" field in the mutation. +func (m *UserMutation) Wechat() (r string, exists bool) { + v := m.wechat + if v == nil { + return + } + return *v, true +} + +// OldWechat returns the old "wechat" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldWechat(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWechat is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWechat requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWechat: %w", err) + } + return oldValue.Wechat, nil +} + +// ResetWechat resets all changes to the "wechat" field. +func (m *UserMutation) ResetWechat() { + m.wechat = nil +} + +// SetNotes sets the "notes" field. +func (m *UserMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *UserMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldNotes(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ResetNotes resets all changes to the "notes" field. +func (m *UserMutation) ResetNotes() { + m.notes = nil +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by ids. +func (m *UserMutation) AddAPIKeyIDs(ids ...int64) { + if m.api_keys == nil { + m.api_keys = make(map[int64]struct{}) + } + for i := range ids { + m.api_keys[ids[i]] = struct{}{} + } +} + +// ClearAPIKeys clears the "api_keys" edge to the ApiKey entity. +func (m *UserMutation) ClearAPIKeys() { + m.clearedapi_keys = true +} + +// APIKeysCleared reports if the "api_keys" edge to the ApiKey entity was cleared. +func (m *UserMutation) APIKeysCleared() bool { + return m.clearedapi_keys +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to the ApiKey entity by IDs. +func (m *UserMutation) RemoveAPIKeyIDs(ids ...int64) { + if m.removedapi_keys == nil { + m.removedapi_keys = make(map[int64]struct{}) + } + for i := range ids { + delete(m.api_keys, ids[i]) + m.removedapi_keys[ids[i]] = struct{}{} + } +} + +// RemovedAPIKeys returns the removed IDs of the "api_keys" edge to the ApiKey entity. +func (m *UserMutation) RemovedAPIKeysIDs() (ids []int64) { + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return +} + +// APIKeysIDs returns the "api_keys" edge IDs in the mutation. +func (m *UserMutation) APIKeysIDs() (ids []int64) { + for id := range m.api_keys { + ids = append(ids, id) + } + return +} + +// ResetAPIKeys resets all changes to the "api_keys" edge. +func (m *UserMutation) ResetAPIKeys() { + m.api_keys = nil + m.clearedapi_keys = false + m.removedapi_keys = nil +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by ids. +func (m *UserMutation) AddRedeemCodeIDs(ids ...int64) { + if m.redeem_codes == nil { + m.redeem_codes = make(map[int64]struct{}) + } + for i := range ids { + m.redeem_codes[ids[i]] = struct{}{} + } +} + +// ClearRedeemCodes clears the "redeem_codes" edge to the RedeemCode entity. +func (m *UserMutation) ClearRedeemCodes() { + m.clearedredeem_codes = true +} + +// RedeemCodesCleared reports if the "redeem_codes" edge to the RedeemCode entity was cleared. +func (m *UserMutation) RedeemCodesCleared() bool { + return m.clearedredeem_codes +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to the RedeemCode entity by IDs. +func (m *UserMutation) RemoveRedeemCodeIDs(ids ...int64) { + if m.removedredeem_codes == nil { + m.removedredeem_codes = make(map[int64]struct{}) + } + for i := range ids { + delete(m.redeem_codes, ids[i]) + m.removedredeem_codes[ids[i]] = struct{}{} + } +} + +// RemovedRedeemCodes returns the removed IDs of the "redeem_codes" edge to the RedeemCode entity. +func (m *UserMutation) RemovedRedeemCodesIDs() (ids []int64) { + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return +} + +// RedeemCodesIDs returns the "redeem_codes" edge IDs in the mutation. +func (m *UserMutation) RedeemCodesIDs() (ids []int64) { + for id := range m.redeem_codes { + ids = append(ids, id) + } + return +} + +// ResetRedeemCodes resets all changes to the "redeem_codes" edge. +func (m *UserMutation) ResetRedeemCodes() { + m.redeem_codes = nil + m.clearedredeem_codes = false + m.removedredeem_codes = nil +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by ids. +func (m *UserMutation) AddSubscriptionIDs(ids ...int64) { + if m.subscriptions == nil { + m.subscriptions = make(map[int64]struct{}) + } + for i := range ids { + m.subscriptions[ids[i]] = struct{}{} + } +} + +// ClearSubscriptions clears the "subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) ClearSubscriptions() { + m.clearedsubscriptions = true +} + +// SubscriptionsCleared reports if the "subscriptions" edge to the UserSubscription entity was cleared. +func (m *UserMutation) SubscriptionsCleared() bool { + return m.clearedsubscriptions +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to the UserSubscription entity by IDs. +func (m *UserMutation) RemoveSubscriptionIDs(ids ...int64) { + if m.removedsubscriptions == nil { + m.removedsubscriptions = make(map[int64]struct{}) + } + for i := range ids { + delete(m.subscriptions, ids[i]) + m.removedsubscriptions[ids[i]] = struct{}{} + } +} + +// RemovedSubscriptions returns the removed IDs of the "subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) RemovedSubscriptionsIDs() (ids []int64) { + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return +} + +// SubscriptionsIDs returns the "subscriptions" edge IDs in the mutation. +func (m *UserMutation) SubscriptionsIDs() (ids []int64) { + for id := range m.subscriptions { + ids = append(ids, id) + } + return +} + +// ResetSubscriptions resets all changes to the "subscriptions" edge. +func (m *UserMutation) ResetSubscriptions() { + m.subscriptions = nil + m.clearedsubscriptions = false + m.removedsubscriptions = nil +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by ids. +func (m *UserMutation) AddAssignedSubscriptionIDs(ids ...int64) { + if m.assigned_subscriptions == nil { + m.assigned_subscriptions = make(map[int64]struct{}) + } + for i := range ids { + m.assigned_subscriptions[ids[i]] = struct{}{} + } +} + +// ClearAssignedSubscriptions clears the "assigned_subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) ClearAssignedSubscriptions() { + m.clearedassigned_subscriptions = true +} + +// AssignedSubscriptionsCleared reports if the "assigned_subscriptions" edge to the UserSubscription entity was cleared. +func (m *UserMutation) AssignedSubscriptionsCleared() bool { + return m.clearedassigned_subscriptions +} + +// RemoveAssignedSubscriptionIDs removes the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (m *UserMutation) RemoveAssignedSubscriptionIDs(ids ...int64) { + if m.removedassigned_subscriptions == nil { + m.removedassigned_subscriptions = make(map[int64]struct{}) + } + for i := range ids { + delete(m.assigned_subscriptions, ids[i]) + m.removedassigned_subscriptions[ids[i]] = struct{}{} + } +} + +// RemovedAssignedSubscriptions returns the removed IDs of the "assigned_subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) RemovedAssignedSubscriptionsIDs() (ids []int64) { + for id := range m.removedassigned_subscriptions { + ids = append(ids, id) + } + return +} + +// AssignedSubscriptionsIDs returns the "assigned_subscriptions" edge IDs in the mutation. +func (m *UserMutation) AssignedSubscriptionsIDs() (ids []int64) { + for id := range m.assigned_subscriptions { + ids = append(ids, id) + } + return +} + +// ResetAssignedSubscriptions resets all changes to the "assigned_subscriptions" edge. +func (m *UserMutation) ResetAssignedSubscriptions() { + m.assigned_subscriptions = nil + m.clearedassigned_subscriptions = false + m.removedassigned_subscriptions = nil +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by ids. +func (m *UserMutation) AddAllowedGroupIDs(ids ...int64) { + if m.allowed_groups == nil { + m.allowed_groups = make(map[int64]struct{}) + } + for i := range ids { + m.allowed_groups[ids[i]] = struct{}{} + } +} + +// ClearAllowedGroups clears the "allowed_groups" edge to the Group entity. +func (m *UserMutation) ClearAllowedGroups() { + m.clearedallowed_groups = true +} + +// AllowedGroupsCleared reports if the "allowed_groups" edge to the Group entity was cleared. +func (m *UserMutation) AllowedGroupsCleared() bool { + return m.clearedallowed_groups +} + +// RemoveAllowedGroupIDs removes the "allowed_groups" edge to the Group entity by IDs. +func (m *UserMutation) RemoveAllowedGroupIDs(ids ...int64) { + if m.removedallowed_groups == nil { + m.removedallowed_groups = make(map[int64]struct{}) + } + for i := range ids { + delete(m.allowed_groups, ids[i]) + m.removedallowed_groups[ids[i]] = struct{}{} + } +} + +// RemovedAllowedGroups returns the removed IDs of the "allowed_groups" edge to the Group entity. +func (m *UserMutation) RemovedAllowedGroupsIDs() (ids []int64) { + for id := range m.removedallowed_groups { + ids = append(ids, id) + } + return +} + +// AllowedGroupsIDs returns the "allowed_groups" edge IDs in the mutation. +func (m *UserMutation) AllowedGroupsIDs() (ids []int64) { + for id := range m.allowed_groups { + ids = append(ids, id) + } + return +} + +// ResetAllowedGroups resets all changes to the "allowed_groups" edge. +func (m *UserMutation) ResetAllowedGroups() { + m.allowed_groups = nil + m.clearedallowed_groups = false + m.removedallowed_groups = nil +} + +// Where appends a list predicates to the UserMutation builder. +func (m *UserMutation) Where(ps ...predicate.User) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.User, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (User). +func (m *UserMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserMutation) Fields() []string { + fields := make([]string, 0, 12) + if m.created_at != nil { + fields = append(fields, user.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, user.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, user.FieldDeletedAt) + } + if m.email != nil { + fields = append(fields, user.FieldEmail) + } + if m.password_hash != nil { + fields = append(fields, user.FieldPasswordHash) + } + if m.role != nil { + fields = append(fields, user.FieldRole) + } + if m.balance != nil { + fields = append(fields, user.FieldBalance) + } + if m.concurrency != nil { + fields = append(fields, user.FieldConcurrency) + } + if m.status != nil { + fields = append(fields, user.FieldStatus) + } + if m.username != nil { + fields = append(fields, user.FieldUsername) + } + if m.wechat != nil { + fields = append(fields, user.FieldWechat) + } + if m.notes != nil { + fields = append(fields, user.FieldNotes) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserMutation) Field(name string) (ent.Value, bool) { + switch name { + case user.FieldCreatedAt: + return m.CreatedAt() + case user.FieldUpdatedAt: + return m.UpdatedAt() + case user.FieldDeletedAt: + return m.DeletedAt() + case user.FieldEmail: + return m.Email() + case user.FieldPasswordHash: + return m.PasswordHash() + case user.FieldRole: + return m.Role() + case user.FieldBalance: + return m.Balance() + case user.FieldConcurrency: + return m.Concurrency() + case user.FieldStatus: + return m.Status() + case user.FieldUsername: + return m.Username() + case user.FieldWechat: + return m.Wechat() + case user.FieldNotes: + return m.Notes() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case user.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case user.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case user.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case user.FieldEmail: + return m.OldEmail(ctx) + case user.FieldPasswordHash: + return m.OldPasswordHash(ctx) + case user.FieldRole: + return m.OldRole(ctx) + case user.FieldBalance: + return m.OldBalance(ctx) + case user.FieldConcurrency: + return m.OldConcurrency(ctx) + case user.FieldStatus: + return m.OldStatus(ctx) + case user.FieldUsername: + return m.OldUsername(ctx) + case user.FieldWechat: + return m.OldWechat(ctx) + case user.FieldNotes: + return m.OldNotes(ctx) + } + return nil, fmt.Errorf("unknown User field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) SetField(name string, value ent.Value) error { + switch name { + case user.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case user.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case user.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case user.FieldEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEmail(v) + return nil + case user.FieldPasswordHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPasswordHash(v) + return nil + case user.FieldRole: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRole(v) + return nil + case user.FieldBalance: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBalance(v) + return nil + case user.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConcurrency(v) + return nil + case user.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case user.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case user.FieldWechat: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWechat(v) + return nil + case user.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserMutation) AddedFields() []string { + var fields []string + if m.addbalance != nil { + fields = append(fields, user.FieldBalance) + } + if m.addconcurrency != nil { + fields = append(fields, user.FieldConcurrency) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case user.FieldBalance: + return m.AddedBalance() + case user.FieldConcurrency: + return m.AddedConcurrency() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) AddField(name string, value ent.Value) error { + switch name { + case user.FieldBalance: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBalance(v) + return nil + case user.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddConcurrency(v) + return nil + } + return fmt.Errorf("unknown User numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(user.FieldDeletedAt) { + fields = append(fields, user.FieldDeletedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserMutation) ClearField(name string) error { + switch name { + case user.FieldDeletedAt: + m.ClearDeletedAt() + return nil + } + return fmt.Errorf("unknown User nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserMutation) ResetField(name string) error { + switch name { + case user.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case user.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case user.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case user.FieldEmail: + m.ResetEmail() + return nil + case user.FieldPasswordHash: + m.ResetPasswordHash() + return nil + case user.FieldRole: + m.ResetRole() + return nil + case user.FieldBalance: + m.ResetBalance() + return nil + case user.FieldConcurrency: + m.ResetConcurrency() + return nil + case user.FieldStatus: + m.ResetStatus() + return nil + case user.FieldUsername: + m.ResetUsername() + return nil + case user.FieldWechat: + m.ResetWechat() + return nil + case user.FieldNotes: + m.ResetNotes() + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserMutation) AddedEdges() []string { + edges := make([]string, 0, 5) + if m.api_keys != nil { + edges = append(edges, user.EdgeAPIKeys) + } + if m.redeem_codes != nil { + edges = append(edges, user.EdgeRedeemCodes) + } + if m.subscriptions != nil { + edges = append(edges, user.EdgeSubscriptions) + } + if m.assigned_subscriptions != nil { + edges = append(edges, user.EdgeAssignedSubscriptions) + } + if m.allowed_groups != nil { + edges = append(edges, user.EdgeAllowedGroups) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserMutation) AddedIDs(name string) []ent.Value { + switch name { + case user.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.api_keys)) + for id := range m.api_keys { + ids = append(ids, id) + } + return ids + case user.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.redeem_codes)) + for id := range m.redeem_codes { + ids = append(ids, id) + } + return ids + case user.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.subscriptions)) + for id := range m.subscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAssignedSubscriptions: + ids := make([]ent.Value, 0, len(m.assigned_subscriptions)) + for id := range m.assigned_subscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAllowedGroups: + ids := make([]ent.Value, 0, len(m.allowed_groups)) + for id := range m.allowed_groups { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserMutation) RemovedEdges() []string { + edges := make([]string, 0, 5) + if m.removedapi_keys != nil { + edges = append(edges, user.EdgeAPIKeys) + } + if m.removedredeem_codes != nil { + edges = append(edges, user.EdgeRedeemCodes) + } + if m.removedsubscriptions != nil { + edges = append(edges, user.EdgeSubscriptions) + } + if m.removedassigned_subscriptions != nil { + edges = append(edges, user.EdgeAssignedSubscriptions) + } + if m.removedallowed_groups != nil { + edges = append(edges, user.EdgeAllowedGroups) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserMutation) RemovedIDs(name string) []ent.Value { + switch name { + case user.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.removedapi_keys)) + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return ids + case user.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.removedredeem_codes)) + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return ids + case user.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.removedsubscriptions)) + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAssignedSubscriptions: + ids := make([]ent.Value, 0, len(m.removedassigned_subscriptions)) + for id := range m.removedassigned_subscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAllowedGroups: + ids := make([]ent.Value, 0, len(m.removedallowed_groups)) + for id := range m.removedallowed_groups { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserMutation) ClearedEdges() []string { + edges := make([]string, 0, 5) + if m.clearedapi_keys { + edges = append(edges, user.EdgeAPIKeys) + } + if m.clearedredeem_codes { + edges = append(edges, user.EdgeRedeemCodes) + } + if m.clearedsubscriptions { + edges = append(edges, user.EdgeSubscriptions) + } + if m.clearedassigned_subscriptions { + edges = append(edges, user.EdgeAssignedSubscriptions) + } + if m.clearedallowed_groups { + edges = append(edges, user.EdgeAllowedGroups) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserMutation) EdgeCleared(name string) bool { + switch name { + case user.EdgeAPIKeys: + return m.clearedapi_keys + case user.EdgeRedeemCodes: + return m.clearedredeem_codes + case user.EdgeSubscriptions: + return m.clearedsubscriptions + case user.EdgeAssignedSubscriptions: + return m.clearedassigned_subscriptions + case user.EdgeAllowedGroups: + return m.clearedallowed_groups + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown User unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserMutation) ResetEdge(name string) error { + switch name { + case user.EdgeAPIKeys: + m.ResetAPIKeys() + return nil + case user.EdgeRedeemCodes: + m.ResetRedeemCodes() + return nil + case user.EdgeSubscriptions: + m.ResetSubscriptions() + return nil + case user.EdgeAssignedSubscriptions: + m.ResetAssignedSubscriptions() + return nil + case user.EdgeAllowedGroups: + m.ResetAllowedGroups() + return nil + } + return fmt.Errorf("unknown User edge %s", name) +} + +// UserAllowedGroupMutation represents an operation that mutates the UserAllowedGroup nodes in the graph. +type UserAllowedGroupMutation struct { + config + op Op + typ string + created_at *time.Time + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + done bool + oldValue func(context.Context) (*UserAllowedGroup, error) + predicates []predicate.UserAllowedGroup +} + +var _ ent.Mutation = (*UserAllowedGroupMutation)(nil) + +// userallowedgroupOption allows management of the mutation configuration using functional options. +type userallowedgroupOption func(*UserAllowedGroupMutation) + +// newUserAllowedGroupMutation creates new mutation for the UserAllowedGroup entity. +func newUserAllowedGroupMutation(c config, op Op, opts ...userallowedgroupOption) *UserAllowedGroupMutation { + m := &UserAllowedGroupMutation{ + config: c, + op: op, + typ: TypeUserAllowedGroup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserAllowedGroupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserAllowedGroupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetUserID sets the "user_id" field. +func (m *UserAllowedGroupMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *UserAllowedGroupMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *UserAllowedGroupMutation) ResetUserID() { + m.user = nil +} + +// SetGroupID sets the "group_id" field. +func (m *UserAllowedGroupMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *UserAllowedGroupMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *UserAllowedGroupMutation) ResetGroupID() { + m.group = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserAllowedGroupMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserAllowedGroupMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserAllowedGroupMutation) ResetCreatedAt() { + m.created_at = nil +} + +// ClearUser clears the "user" edge to the User entity. +func (m *UserAllowedGroupMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[userallowedgroup.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *UserAllowedGroupMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *UserAllowedGroupMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *UserAllowedGroupMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *UserAllowedGroupMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[userallowedgroup.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *UserAllowedGroupMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *UserAllowedGroupMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *UserAllowedGroupMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the UserAllowedGroupMutation builder. +func (m *UserAllowedGroupMutation) Where(ps ...predicate.UserAllowedGroup) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserAllowedGroupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserAllowedGroupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserAllowedGroup, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserAllowedGroupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserAllowedGroupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserAllowedGroup). +func (m *UserAllowedGroupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserAllowedGroupMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.user != nil { + fields = append(fields, userallowedgroup.FieldUserID) + } + if m.group != nil { + fields = append(fields, userallowedgroup.FieldGroupID) + } + if m.created_at != nil { + fields = append(fields, userallowedgroup.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserAllowedGroupMutation) Field(name string) (ent.Value, bool) { + switch name { + case userallowedgroup.FieldUserID: + return m.UserID() + case userallowedgroup.FieldGroupID: + return m.GroupID() + case userallowedgroup.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserAllowedGroupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + return nil, errors.New("edge schema UserAllowedGroup does not support getting old values") +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAllowedGroupMutation) SetField(name string, value ent.Value) error { + switch name { + case userallowedgroup.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case userallowedgroup.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case userallowedgroup.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown UserAllowedGroup field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserAllowedGroupMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserAllowedGroupMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAllowedGroupMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown UserAllowedGroup numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserAllowedGroupMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserAllowedGroupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserAllowedGroupMutation) ClearField(name string) error { + return fmt.Errorf("unknown UserAllowedGroup nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserAllowedGroupMutation) ResetField(name string) error { + switch name { + case userallowedgroup.FieldUserID: + m.ResetUserID() + return nil + case userallowedgroup.FieldGroupID: + m.ResetGroupID() + return nil + case userallowedgroup.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown UserAllowedGroup field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserAllowedGroupMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, userallowedgroup.EdgeUser) + } + if m.group != nil { + edges = append(edges, userallowedgroup.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserAllowedGroupMutation) AddedIDs(name string) []ent.Value { + switch name { + case userallowedgroup.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case userallowedgroup.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserAllowedGroupMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserAllowedGroupMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserAllowedGroupMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, userallowedgroup.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, userallowedgroup.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserAllowedGroupMutation) EdgeCleared(name string) bool { + switch name { + case userallowedgroup.EdgeUser: + return m.cleareduser + case userallowedgroup.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserAllowedGroupMutation) ClearEdge(name string) error { + switch name { + case userallowedgroup.EdgeUser: + m.ClearUser() + return nil + case userallowedgroup.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown UserAllowedGroup unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserAllowedGroupMutation) ResetEdge(name string) error { + switch name { + case userallowedgroup.EdgeUser: + m.ResetUser() + return nil + case userallowedgroup.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown UserAllowedGroup edge %s", name) +} + +// UserSubscriptionMutation represents an operation that mutates the UserSubscription nodes in the graph. +type UserSubscriptionMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + starts_at *time.Time + expires_at *time.Time + status *string + daily_window_start *time.Time + weekly_window_start *time.Time + monthly_window_start *time.Time + daily_usage_usd *float64 + adddaily_usage_usd *float64 + weekly_usage_usd *float64 + addweekly_usage_usd *float64 + monthly_usage_usd *float64 + addmonthly_usage_usd *float64 + assigned_at *time.Time + notes *string + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + assigned_by_user *int64 + clearedassigned_by_user bool + done bool + oldValue func(context.Context) (*UserSubscription, error) + predicates []predicate.UserSubscription +} + +var _ ent.Mutation = (*UserSubscriptionMutation)(nil) + +// usersubscriptionOption allows management of the mutation configuration using functional options. +type usersubscriptionOption func(*UserSubscriptionMutation) + +// newUserSubscriptionMutation creates new mutation for the UserSubscription entity. +func newUserSubscriptionMutation(c config, op Op, opts ...usersubscriptionOption) *UserSubscriptionMutation { + m := &UserSubscriptionMutation{ + config: c, + op: op, + typ: TypeUserSubscription, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserSubscriptionID sets the ID field of the mutation. +func withUserSubscriptionID(id int64) usersubscriptionOption { + return func(m *UserSubscriptionMutation) { + var ( + err error + once sync.Once + value *UserSubscription + ) + m.oldValue = func(ctx context.Context) (*UserSubscription, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UserSubscription.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUserSubscription sets the old UserSubscription of the mutation. +func withUserSubscription(node *UserSubscription) usersubscriptionOption { + return func(m *UserSubscriptionMutation) { + m.oldValue = func(context.Context) (*UserSubscription, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserSubscriptionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserSubscriptionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserSubscriptionMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserSubscriptionMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UserSubscription.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserSubscriptionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserSubscriptionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserSubscriptionMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserSubscriptionMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserSubscriptionMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserSubscriptionMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetUserID sets the "user_id" field. +func (m *UserSubscriptionMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *UserSubscriptionMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *UserSubscriptionMutation) ResetUserID() { + m.user = nil +} + +// SetGroupID sets the "group_id" field. +func (m *UserSubscriptionMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *UserSubscriptionMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldGroupID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *UserSubscriptionMutation) ResetGroupID() { + m.group = nil +} + +// SetStartsAt sets the "starts_at" field. +func (m *UserSubscriptionMutation) SetStartsAt(t time.Time) { + m.starts_at = &t +} + +// StartsAt returns the value of the "starts_at" field in the mutation. +func (m *UserSubscriptionMutation) StartsAt() (r time.Time, exists bool) { + v := m.starts_at + if v == nil { + return + } + return *v, true +} + +// OldStartsAt returns the old "starts_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldStartsAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartsAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartsAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartsAt: %w", err) + } + return oldValue.StartsAt, nil +} + +// ResetStartsAt resets all changes to the "starts_at" field. +func (m *UserSubscriptionMutation) ResetStartsAt() { + m.starts_at = nil +} + +// SetExpiresAt sets the "expires_at" field. +func (m *UserSubscriptionMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *UserSubscriptionMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *UserSubscriptionMutation) ResetExpiresAt() { + m.expires_at = nil +} + +// SetStatus sets the "status" field. +func (m *UserSubscriptionMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *UserSubscriptionMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *UserSubscriptionMutation) ResetStatus() { + m.status = nil +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (m *UserSubscriptionMutation) SetDailyWindowStart(t time.Time) { + m.daily_window_start = &t +} + +// DailyWindowStart returns the value of the "daily_window_start" field in the mutation. +func (m *UserSubscriptionMutation) DailyWindowStart() (r time.Time, exists bool) { + v := m.daily_window_start + if v == nil { + return + } + return *v, true +} + +// OldDailyWindowStart returns the old "daily_window_start" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldDailyWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDailyWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDailyWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDailyWindowStart: %w", err) + } + return oldValue.DailyWindowStart, nil +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (m *UserSubscriptionMutation) ClearDailyWindowStart() { + m.daily_window_start = nil + m.clearedFields[usersubscription.FieldDailyWindowStart] = struct{}{} +} + +// DailyWindowStartCleared returns if the "daily_window_start" field was cleared in this mutation. +func (m *UserSubscriptionMutation) DailyWindowStartCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldDailyWindowStart] + return ok +} + +// ResetDailyWindowStart resets all changes to the "daily_window_start" field. +func (m *UserSubscriptionMutation) ResetDailyWindowStart() { + m.daily_window_start = nil + delete(m.clearedFields, usersubscription.FieldDailyWindowStart) +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (m *UserSubscriptionMutation) SetWeeklyWindowStart(t time.Time) { + m.weekly_window_start = &t +} + +// WeeklyWindowStart returns the value of the "weekly_window_start" field in the mutation. +func (m *UserSubscriptionMutation) WeeklyWindowStart() (r time.Time, exists bool) { + v := m.weekly_window_start + if v == nil { + return + } + return *v, true +} + +// OldWeeklyWindowStart returns the old "weekly_window_start" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldWeeklyWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWeeklyWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWeeklyWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWeeklyWindowStart: %w", err) + } + return oldValue.WeeklyWindowStart, nil +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (m *UserSubscriptionMutation) ClearWeeklyWindowStart() { + m.weekly_window_start = nil + m.clearedFields[usersubscription.FieldWeeklyWindowStart] = struct{}{} +} + +// WeeklyWindowStartCleared returns if the "weekly_window_start" field was cleared in this mutation. +func (m *UserSubscriptionMutation) WeeklyWindowStartCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldWeeklyWindowStart] + return ok +} + +// ResetWeeklyWindowStart resets all changes to the "weekly_window_start" field. +func (m *UserSubscriptionMutation) ResetWeeklyWindowStart() { + m.weekly_window_start = nil + delete(m.clearedFields, usersubscription.FieldWeeklyWindowStart) +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (m *UserSubscriptionMutation) SetMonthlyWindowStart(t time.Time) { + m.monthly_window_start = &t +} + +// MonthlyWindowStart returns the value of the "monthly_window_start" field in the mutation. +func (m *UserSubscriptionMutation) MonthlyWindowStart() (r time.Time, exists bool) { + v := m.monthly_window_start + if v == nil { + return + } + return *v, true +} + +// OldMonthlyWindowStart returns the old "monthly_window_start" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldMonthlyWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonthlyWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonthlyWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonthlyWindowStart: %w", err) + } + return oldValue.MonthlyWindowStart, nil +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (m *UserSubscriptionMutation) ClearMonthlyWindowStart() { + m.monthly_window_start = nil + m.clearedFields[usersubscription.FieldMonthlyWindowStart] = struct{}{} +} + +// MonthlyWindowStartCleared returns if the "monthly_window_start" field was cleared in this mutation. +func (m *UserSubscriptionMutation) MonthlyWindowStartCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldMonthlyWindowStart] + return ok +} + +// ResetMonthlyWindowStart resets all changes to the "monthly_window_start" field. +func (m *UserSubscriptionMutation) ResetMonthlyWindowStart() { + m.monthly_window_start = nil + delete(m.clearedFields, usersubscription.FieldMonthlyWindowStart) +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (m *UserSubscriptionMutation) SetDailyUsageUsd(f float64) { + m.daily_usage_usd = &f + m.adddaily_usage_usd = nil +} + +// DailyUsageUsd returns the value of the "daily_usage_usd" field in the mutation. +func (m *UserSubscriptionMutation) DailyUsageUsd() (r float64, exists bool) { + v := m.daily_usage_usd + if v == nil { + return + } + return *v, true +} + +// OldDailyUsageUsd returns the old "daily_usage_usd" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldDailyUsageUsd(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDailyUsageUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDailyUsageUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDailyUsageUsd: %w", err) + } + return oldValue.DailyUsageUsd, nil +} + +// AddDailyUsageUsd adds f to the "daily_usage_usd" field. +func (m *UserSubscriptionMutation) AddDailyUsageUsd(f float64) { + if m.adddaily_usage_usd != nil { + *m.adddaily_usage_usd += f + } else { + m.adddaily_usage_usd = &f + } +} + +// AddedDailyUsageUsd returns the value that was added to the "daily_usage_usd" field in this mutation. +func (m *UserSubscriptionMutation) AddedDailyUsageUsd() (r float64, exists bool) { + v := m.adddaily_usage_usd + if v == nil { + return + } + return *v, true +} + +// ResetDailyUsageUsd resets all changes to the "daily_usage_usd" field. +func (m *UserSubscriptionMutation) ResetDailyUsageUsd() { + m.daily_usage_usd = nil + m.adddaily_usage_usd = nil +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (m *UserSubscriptionMutation) SetWeeklyUsageUsd(f float64) { + m.weekly_usage_usd = &f + m.addweekly_usage_usd = nil +} + +// WeeklyUsageUsd returns the value of the "weekly_usage_usd" field in the mutation. +func (m *UserSubscriptionMutation) WeeklyUsageUsd() (r float64, exists bool) { + v := m.weekly_usage_usd + if v == nil { + return + } + return *v, true +} + +// OldWeeklyUsageUsd returns the old "weekly_usage_usd" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldWeeklyUsageUsd(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWeeklyUsageUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWeeklyUsageUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWeeklyUsageUsd: %w", err) + } + return oldValue.WeeklyUsageUsd, nil +} + +// AddWeeklyUsageUsd adds f to the "weekly_usage_usd" field. +func (m *UserSubscriptionMutation) AddWeeklyUsageUsd(f float64) { + if m.addweekly_usage_usd != nil { + *m.addweekly_usage_usd += f + } else { + m.addweekly_usage_usd = &f + } +} + +// AddedWeeklyUsageUsd returns the value that was added to the "weekly_usage_usd" field in this mutation. +func (m *UserSubscriptionMutation) AddedWeeklyUsageUsd() (r float64, exists bool) { + v := m.addweekly_usage_usd + if v == nil { + return + } + return *v, true +} + +// ResetWeeklyUsageUsd resets all changes to the "weekly_usage_usd" field. +func (m *UserSubscriptionMutation) ResetWeeklyUsageUsd() { + m.weekly_usage_usd = nil + m.addweekly_usage_usd = nil +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (m *UserSubscriptionMutation) SetMonthlyUsageUsd(f float64) { + m.monthly_usage_usd = &f + m.addmonthly_usage_usd = nil +} + +// MonthlyUsageUsd returns the value of the "monthly_usage_usd" field in the mutation. +func (m *UserSubscriptionMutation) MonthlyUsageUsd() (r float64, exists bool) { + v := m.monthly_usage_usd + if v == nil { + return + } + return *v, true +} + +// OldMonthlyUsageUsd returns the old "monthly_usage_usd" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldMonthlyUsageUsd(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonthlyUsageUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonthlyUsageUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonthlyUsageUsd: %w", err) + } + return oldValue.MonthlyUsageUsd, nil +} + +// AddMonthlyUsageUsd adds f to the "monthly_usage_usd" field. +func (m *UserSubscriptionMutation) AddMonthlyUsageUsd(f float64) { + if m.addmonthly_usage_usd != nil { + *m.addmonthly_usage_usd += f + } else { + m.addmonthly_usage_usd = &f + } +} + +// AddedMonthlyUsageUsd returns the value that was added to the "monthly_usage_usd" field in this mutation. +func (m *UserSubscriptionMutation) AddedMonthlyUsageUsd() (r float64, exists bool) { + v := m.addmonthly_usage_usd + if v == nil { + return + } + return *v, true +} + +// ResetMonthlyUsageUsd resets all changes to the "monthly_usage_usd" field. +func (m *UserSubscriptionMutation) ResetMonthlyUsageUsd() { + m.monthly_usage_usd = nil + m.addmonthly_usage_usd = nil +} + +// SetAssignedBy sets the "assigned_by" field. +func (m *UserSubscriptionMutation) SetAssignedBy(i int64) { + m.assigned_by_user = &i +} + +// AssignedBy returns the value of the "assigned_by" field in the mutation. +func (m *UserSubscriptionMutation) AssignedBy() (r int64, exists bool) { + v := m.assigned_by_user + if v == nil { + return + } + return *v, true +} + +// OldAssignedBy returns the old "assigned_by" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldAssignedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAssignedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAssignedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAssignedBy: %w", err) + } + return oldValue.AssignedBy, nil +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (m *UserSubscriptionMutation) ClearAssignedBy() { + m.assigned_by_user = nil + m.clearedFields[usersubscription.FieldAssignedBy] = struct{}{} +} + +// AssignedByCleared returns if the "assigned_by" field was cleared in this mutation. +func (m *UserSubscriptionMutation) AssignedByCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldAssignedBy] + return ok +} + +// ResetAssignedBy resets all changes to the "assigned_by" field. +func (m *UserSubscriptionMutation) ResetAssignedBy() { + m.assigned_by_user = nil + delete(m.clearedFields, usersubscription.FieldAssignedBy) +} + +// SetAssignedAt sets the "assigned_at" field. +func (m *UserSubscriptionMutation) SetAssignedAt(t time.Time) { + m.assigned_at = &t +} + +// AssignedAt returns the value of the "assigned_at" field in the mutation. +func (m *UserSubscriptionMutation) AssignedAt() (r time.Time, exists bool) { + v := m.assigned_at + if v == nil { + return + } + return *v, true +} + +// OldAssignedAt returns the old "assigned_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldAssignedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAssignedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAssignedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAssignedAt: %w", err) + } + return oldValue.AssignedAt, nil +} + +// ResetAssignedAt resets all changes to the "assigned_at" field. +func (m *UserSubscriptionMutation) ResetAssignedAt() { + m.assigned_at = nil +} + +// SetNotes sets the "notes" field. +func (m *UserSubscriptionMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *UserSubscriptionMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldNotes(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ClearNotes clears the value of the "notes" field. +func (m *UserSubscriptionMutation) ClearNotes() { + m.notes = nil + m.clearedFields[usersubscription.FieldNotes] = struct{}{} +} + +// NotesCleared returns if the "notes" field was cleared in this mutation. +func (m *UserSubscriptionMutation) NotesCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldNotes] + return ok +} + +// ResetNotes resets all changes to the "notes" field. +func (m *UserSubscriptionMutation) ResetNotes() { + m.notes = nil + delete(m.clearedFields, usersubscription.FieldNotes) +} + +// ClearUser clears the "user" edge to the User entity. +func (m *UserSubscriptionMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[usersubscription.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *UserSubscriptionMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *UserSubscriptionMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *UserSubscriptionMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *UserSubscriptionMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[usersubscription.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *UserSubscriptionMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *UserSubscriptionMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *UserSubscriptionMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by id. +func (m *UserSubscriptionMutation) SetAssignedByUserID(id int64) { + m.assigned_by_user = &id +} + +// ClearAssignedByUser clears the "assigned_by_user" edge to the User entity. +func (m *UserSubscriptionMutation) ClearAssignedByUser() { + m.clearedassigned_by_user = true + m.clearedFields[usersubscription.FieldAssignedBy] = struct{}{} +} + +// AssignedByUserCleared reports if the "assigned_by_user" edge to the User entity was cleared. +func (m *UserSubscriptionMutation) AssignedByUserCleared() bool { + return m.AssignedByCleared() || m.clearedassigned_by_user +} + +// AssignedByUserID returns the "assigned_by_user" edge ID in the mutation. +func (m *UserSubscriptionMutation) AssignedByUserID() (id int64, exists bool) { + if m.assigned_by_user != nil { + return *m.assigned_by_user, true + } + return +} + +// AssignedByUserIDs returns the "assigned_by_user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AssignedByUserID instead. It exists only for internal usage by the builders. +func (m *UserSubscriptionMutation) AssignedByUserIDs() (ids []int64) { + if id := m.assigned_by_user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAssignedByUser resets all changes to the "assigned_by_user" edge. +func (m *UserSubscriptionMutation) ResetAssignedByUser() { + m.assigned_by_user = nil + m.clearedassigned_by_user = false +} + +// Where appends a list predicates to the UserSubscriptionMutation builder. +func (m *UserSubscriptionMutation) Where(ps ...predicate.UserSubscription) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserSubscriptionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserSubscriptionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserSubscription, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserSubscriptionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserSubscriptionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserSubscription). +func (m *UserSubscriptionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserSubscriptionMutation) Fields() []string { + fields := make([]string, 0, 16) + if m.created_at != nil { + fields = append(fields, usersubscription.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, usersubscription.FieldUpdatedAt) + } + if m.user != nil { + fields = append(fields, usersubscription.FieldUserID) + } + if m.group != nil { + fields = append(fields, usersubscription.FieldGroupID) + } + if m.starts_at != nil { + fields = append(fields, usersubscription.FieldStartsAt) + } + if m.expires_at != nil { + fields = append(fields, usersubscription.FieldExpiresAt) + } + if m.status != nil { + fields = append(fields, usersubscription.FieldStatus) + } + if m.daily_window_start != nil { + fields = append(fields, usersubscription.FieldDailyWindowStart) + } + if m.weekly_window_start != nil { + fields = append(fields, usersubscription.FieldWeeklyWindowStart) + } + if m.monthly_window_start != nil { + fields = append(fields, usersubscription.FieldMonthlyWindowStart) + } + if m.daily_usage_usd != nil { + fields = append(fields, usersubscription.FieldDailyUsageUsd) + } + if m.weekly_usage_usd != nil { + fields = append(fields, usersubscription.FieldWeeklyUsageUsd) + } + if m.monthly_usage_usd != nil { + fields = append(fields, usersubscription.FieldMonthlyUsageUsd) + } + if m.assigned_by_user != nil { + fields = append(fields, usersubscription.FieldAssignedBy) + } + if m.assigned_at != nil { + fields = append(fields, usersubscription.FieldAssignedAt) + } + if m.notes != nil { + fields = append(fields, usersubscription.FieldNotes) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserSubscriptionMutation) Field(name string) (ent.Value, bool) { + switch name { + case usersubscription.FieldCreatedAt: + return m.CreatedAt() + case usersubscription.FieldUpdatedAt: + return m.UpdatedAt() + case usersubscription.FieldUserID: + return m.UserID() + case usersubscription.FieldGroupID: + return m.GroupID() + case usersubscription.FieldStartsAt: + return m.StartsAt() + case usersubscription.FieldExpiresAt: + return m.ExpiresAt() + case usersubscription.FieldStatus: + return m.Status() + case usersubscription.FieldDailyWindowStart: + return m.DailyWindowStart() + case usersubscription.FieldWeeklyWindowStart: + return m.WeeklyWindowStart() + case usersubscription.FieldMonthlyWindowStart: + return m.MonthlyWindowStart() + case usersubscription.FieldDailyUsageUsd: + return m.DailyUsageUsd() + case usersubscription.FieldWeeklyUsageUsd: + return m.WeeklyUsageUsd() + case usersubscription.FieldMonthlyUsageUsd: + return m.MonthlyUsageUsd() + case usersubscription.FieldAssignedBy: + return m.AssignedBy() + case usersubscription.FieldAssignedAt: + return m.AssignedAt() + case usersubscription.FieldNotes: + return m.Notes() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserSubscriptionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case usersubscription.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case usersubscription.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case usersubscription.FieldUserID: + return m.OldUserID(ctx) + case usersubscription.FieldGroupID: + return m.OldGroupID(ctx) + case usersubscription.FieldStartsAt: + return m.OldStartsAt(ctx) + case usersubscription.FieldExpiresAt: + return m.OldExpiresAt(ctx) + case usersubscription.FieldStatus: + return m.OldStatus(ctx) + case usersubscription.FieldDailyWindowStart: + return m.OldDailyWindowStart(ctx) + case usersubscription.FieldWeeklyWindowStart: + return m.OldWeeklyWindowStart(ctx) + case usersubscription.FieldMonthlyWindowStart: + return m.OldMonthlyWindowStart(ctx) + case usersubscription.FieldDailyUsageUsd: + return m.OldDailyUsageUsd(ctx) + case usersubscription.FieldWeeklyUsageUsd: + return m.OldWeeklyUsageUsd(ctx) + case usersubscription.FieldMonthlyUsageUsd: + return m.OldMonthlyUsageUsd(ctx) + case usersubscription.FieldAssignedBy: + return m.OldAssignedBy(ctx) + case usersubscription.FieldAssignedAt: + return m.OldAssignedAt(ctx) + case usersubscription.FieldNotes: + return m.OldNotes(ctx) + } + return nil, fmt.Errorf("unknown UserSubscription field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserSubscriptionMutation) SetField(name string, value ent.Value) error { + switch name { + case usersubscription.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case usersubscription.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case usersubscription.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case usersubscription.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case usersubscription.FieldStartsAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartsAt(v) + return nil + case usersubscription.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + case usersubscription.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case usersubscription.FieldDailyWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDailyWindowStart(v) + return nil + case usersubscription.FieldWeeklyWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWeeklyWindowStart(v) + return nil + case usersubscription.FieldMonthlyWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonthlyWindowStart(v) + return nil + case usersubscription.FieldDailyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDailyUsageUsd(v) + return nil + case usersubscription.FieldWeeklyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWeeklyUsageUsd(v) + return nil + case usersubscription.FieldMonthlyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonthlyUsageUsd(v) + return nil + case usersubscription.FieldAssignedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAssignedBy(v) + return nil + case usersubscription.FieldAssignedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAssignedAt(v) + return nil + case usersubscription.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + } + return fmt.Errorf("unknown UserSubscription field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserSubscriptionMutation) AddedFields() []string { + var fields []string + if m.adddaily_usage_usd != nil { + fields = append(fields, usersubscription.FieldDailyUsageUsd) + } + if m.addweekly_usage_usd != nil { + fields = append(fields, usersubscription.FieldWeeklyUsageUsd) + } + if m.addmonthly_usage_usd != nil { + fields = append(fields, usersubscription.FieldMonthlyUsageUsd) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserSubscriptionMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case usersubscription.FieldDailyUsageUsd: + return m.AddedDailyUsageUsd() + case usersubscription.FieldWeeklyUsageUsd: + return m.AddedWeeklyUsageUsd() + case usersubscription.FieldMonthlyUsageUsd: + return m.AddedMonthlyUsageUsd() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserSubscriptionMutation) AddField(name string, value ent.Value) error { + switch name { + case usersubscription.FieldDailyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDailyUsageUsd(v) + return nil + case usersubscription.FieldWeeklyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddWeeklyUsageUsd(v) + return nil + case usersubscription.FieldMonthlyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddMonthlyUsageUsd(v) + return nil + } + return fmt.Errorf("unknown UserSubscription numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserSubscriptionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(usersubscription.FieldDailyWindowStart) { + fields = append(fields, usersubscription.FieldDailyWindowStart) + } + if m.FieldCleared(usersubscription.FieldWeeklyWindowStart) { + fields = append(fields, usersubscription.FieldWeeklyWindowStart) + } + if m.FieldCleared(usersubscription.FieldMonthlyWindowStart) { + fields = append(fields, usersubscription.FieldMonthlyWindowStart) + } + if m.FieldCleared(usersubscription.FieldAssignedBy) { + fields = append(fields, usersubscription.FieldAssignedBy) + } + if m.FieldCleared(usersubscription.FieldNotes) { + fields = append(fields, usersubscription.FieldNotes) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserSubscriptionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserSubscriptionMutation) ClearField(name string) error { + switch name { + case usersubscription.FieldDailyWindowStart: + m.ClearDailyWindowStart() + return nil + case usersubscription.FieldWeeklyWindowStart: + m.ClearWeeklyWindowStart() + return nil + case usersubscription.FieldMonthlyWindowStart: + m.ClearMonthlyWindowStart() + return nil + case usersubscription.FieldAssignedBy: + m.ClearAssignedBy() + return nil + case usersubscription.FieldNotes: + m.ClearNotes() + return nil + } + return fmt.Errorf("unknown UserSubscription nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserSubscriptionMutation) ResetField(name string) error { + switch name { + case usersubscription.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case usersubscription.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case usersubscription.FieldUserID: + m.ResetUserID() + return nil + case usersubscription.FieldGroupID: + m.ResetGroupID() + return nil + case usersubscription.FieldStartsAt: + m.ResetStartsAt() + return nil + case usersubscription.FieldExpiresAt: + m.ResetExpiresAt() + return nil + case usersubscription.FieldStatus: + m.ResetStatus() + return nil + case usersubscription.FieldDailyWindowStart: + m.ResetDailyWindowStart() + return nil + case usersubscription.FieldWeeklyWindowStart: + m.ResetWeeklyWindowStart() + return nil + case usersubscription.FieldMonthlyWindowStart: + m.ResetMonthlyWindowStart() + return nil + case usersubscription.FieldDailyUsageUsd: + m.ResetDailyUsageUsd() + return nil + case usersubscription.FieldWeeklyUsageUsd: + m.ResetWeeklyUsageUsd() + return nil + case usersubscription.FieldMonthlyUsageUsd: + m.ResetMonthlyUsageUsd() + return nil + case usersubscription.FieldAssignedBy: + m.ResetAssignedBy() + return nil + case usersubscription.FieldAssignedAt: + m.ResetAssignedAt() + return nil + case usersubscription.FieldNotes: + m.ResetNotes() + return nil + } + return fmt.Errorf("unknown UserSubscription field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserSubscriptionMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.user != nil { + edges = append(edges, usersubscription.EdgeUser) + } + if m.group != nil { + edges = append(edges, usersubscription.EdgeGroup) + } + if m.assigned_by_user != nil { + edges = append(edges, usersubscription.EdgeAssignedByUser) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserSubscriptionMutation) AddedIDs(name string) []ent.Value { + switch name { + case usersubscription.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case usersubscription.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + case usersubscription.EdgeAssignedByUser: + if id := m.assigned_by_user; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserSubscriptionMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserSubscriptionMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserSubscriptionMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.cleareduser { + edges = append(edges, usersubscription.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, usersubscription.EdgeGroup) + } + if m.clearedassigned_by_user { + edges = append(edges, usersubscription.EdgeAssignedByUser) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserSubscriptionMutation) EdgeCleared(name string) bool { + switch name { + case usersubscription.EdgeUser: + return m.cleareduser + case usersubscription.EdgeGroup: + return m.clearedgroup + case usersubscription.EdgeAssignedByUser: + return m.clearedassigned_by_user + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserSubscriptionMutation) ClearEdge(name string) error { + switch name { + case usersubscription.EdgeUser: + m.ClearUser() + return nil + case usersubscription.EdgeGroup: + m.ClearGroup() + return nil + case usersubscription.EdgeAssignedByUser: + m.ClearAssignedByUser() + return nil + } + return fmt.Errorf("unknown UserSubscription unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserSubscriptionMutation) ResetEdge(name string) error { + switch name { + case usersubscription.EdgeUser: + m.ResetUser() + return nil + case usersubscription.EdgeGroup: + m.ResetGroup() + return nil + case usersubscription.EdgeAssignedByUser: + m.ResetAssignedByUser() + return nil + } + return fmt.Errorf("unknown UserSubscription edge %s", name) +} diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go new file mode 100644 index 00000000..467dad7b --- /dev/null +++ b/backend/ent/predicate/predicate.go @@ -0,0 +1,37 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// Account is the predicate function for account builders. +type Account func(*sql.Selector) + +// AccountGroup is the predicate function for accountgroup builders. +type AccountGroup func(*sql.Selector) + +// ApiKey is the predicate function for apikey builders. +type ApiKey func(*sql.Selector) + +// Group is the predicate function for group builders. +type Group func(*sql.Selector) + +// Proxy is the predicate function for proxy builders. +type Proxy func(*sql.Selector) + +// RedeemCode is the predicate function for redeemcode builders. +type RedeemCode func(*sql.Selector) + +// Setting is the predicate function for setting builders. +type Setting func(*sql.Selector) + +// User is the predicate function for user builders. +type User func(*sql.Selector) + +// UserAllowedGroup is the predicate function for userallowedgroup builders. +type UserAllowedGroup func(*sql.Selector) + +// UserSubscription is the predicate function for usersubscription builders. +type UserSubscription func(*sql.Selector) diff --git a/backend/ent/proxy.go b/backend/ent/proxy.go new file mode 100644 index 00000000..eb271c7a --- /dev/null +++ b/backend/ent/proxy.go @@ -0,0 +1,214 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// Proxy is the model entity for the Proxy schema. +type Proxy struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Protocol holds the value of the "protocol" field. + Protocol string `json:"protocol,omitempty"` + // Host holds the value of the "host" field. + Host string `json:"host,omitempty"` + // Port holds the value of the "port" field. + Port int `json:"port,omitempty"` + // Username holds the value of the "username" field. + Username *string `json:"username,omitempty"` + // Password holds the value of the "password" field. + Password *string `json:"password,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Proxy) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case proxy.FieldID, proxy.FieldPort: + values[i] = new(sql.NullInt64) + case proxy.FieldName, proxy.FieldProtocol, proxy.FieldHost, proxy.FieldUsername, proxy.FieldPassword, proxy.FieldStatus: + values[i] = new(sql.NullString) + case proxy.FieldCreatedAt, proxy.FieldUpdatedAt, proxy.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Proxy fields. +func (_m *Proxy) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case proxy.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case proxy.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case proxy.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case proxy.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case proxy.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case proxy.FieldProtocol: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field protocol", values[i]) + } else if value.Valid { + _m.Protocol = value.String + } + case proxy.FieldHost: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field host", values[i]) + } else if value.Valid { + _m.Host = value.String + } + case proxy.FieldPort: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field port", values[i]) + } else if value.Valid { + _m.Port = int(value.Int64) + } + case proxy.FieldUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field username", values[i]) + } else if value.Valid { + _m.Username = new(string) + *_m.Username = value.String + } + case proxy.FieldPassword: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password", values[i]) + } else if value.Valid { + _m.Password = new(string) + *_m.Password = value.String + } + case proxy.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Proxy. +// This includes values selected through modifiers, order, etc. +func (_m *Proxy) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this Proxy. +// Note that you need to call Proxy.Unwrap() before calling this method if this Proxy +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Proxy) Update() *ProxyUpdateOne { + return NewProxyClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Proxy entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Proxy) Unwrap() *Proxy { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Proxy is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Proxy) String() string { + var builder strings.Builder + builder.WriteString("Proxy(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("protocol=") + builder.WriteString(_m.Protocol) + builder.WriteString(", ") + builder.WriteString("host=") + builder.WriteString(_m.Host) + builder.WriteString(", ") + builder.WriteString("port=") + builder.WriteString(fmt.Sprintf("%v", _m.Port)) + builder.WriteString(", ") + if v := _m.Username; v != nil { + builder.WriteString("username=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.Password; v != nil { + builder.WriteString("password=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteByte(')') + return builder.String() +} + +// Proxies is a parsable slice of Proxy. +type Proxies []*Proxy diff --git a/backend/ent/proxy/proxy.go b/backend/ent/proxy/proxy.go new file mode 100644 index 00000000..e5e1067c --- /dev/null +++ b/backend/ent/proxy/proxy.go @@ -0,0 +1,152 @@ +// Code generated by ent, DO NOT EDIT. + +package proxy + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the proxy type in the database. + Label = "proxy" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldProtocol holds the string denoting the protocol field in the database. + FieldProtocol = "protocol" + // FieldHost holds the string denoting the host field in the database. + FieldHost = "host" + // FieldPort holds the string denoting the port field in the database. + FieldPort = "port" + // FieldUsername holds the string denoting the username field in the database. + FieldUsername = "username" + // FieldPassword holds the string denoting the password field in the database. + FieldPassword = "password" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // Table holds the table name of the proxy in the database. + Table = "proxies" +) + +// Columns holds all SQL columns for proxy fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldName, + FieldProtocol, + FieldHost, + FieldPort, + FieldUsername, + FieldPassword, + FieldStatus, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // ProtocolValidator is a validator for the "protocol" field. It is called by the builders before save. + ProtocolValidator func(string) error + // HostValidator is a validator for the "host" field. It is called by the builders before save. + HostValidator func(string) error + // UsernameValidator is a validator for the "username" field. It is called by the builders before save. + UsernameValidator func(string) error + // PasswordValidator is a validator for the "password" field. It is called by the builders before save. + PasswordValidator func(string) error + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error +) + +// OrderOption defines the ordering options for the Proxy queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByProtocol orders the results by the protocol field. +func ByProtocol(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProtocol, opts...).ToFunc() +} + +// ByHost orders the results by the host field. +func ByHost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHost, opts...).ToFunc() +} + +// ByPort orders the results by the port field. +func ByPort(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPort, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByPassword orders the results by the password field. +func ByPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPassword, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} diff --git a/backend/ent/proxy/where.go b/backend/ent/proxy/where.go new file mode 100644 index 00000000..ad92cee6 --- /dev/null +++ b/backend/ent/proxy/where.go @@ -0,0 +1,700 @@ +// Code generated by ent, DO NOT EDIT. + +package proxy + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldName, v)) +} + +// Protocol applies equality check predicate on the "protocol" field. It's identical to ProtocolEQ. +func Protocol(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldProtocol, v)) +} + +// Host applies equality check predicate on the "host" field. It's identical to HostEQ. +func Host(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldHost, v)) +} + +// Port applies equality check predicate on the "port" field. It's identical to PortEQ. +func Port(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPort, v)) +} + +// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. +func Username(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUsername, v)) +} + +// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. +func Password(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPassword, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldStatus, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Proxy { + return predicate.Proxy(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Proxy { + return predicate.Proxy(sql.FieldNotNull(FieldDeletedAt)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldName, v)) +} + +// ProtocolEQ applies the EQ predicate on the "protocol" field. +func ProtocolEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldProtocol, v)) +} + +// ProtocolNEQ applies the NEQ predicate on the "protocol" field. +func ProtocolNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldProtocol, v)) +} + +// ProtocolIn applies the In predicate on the "protocol" field. +func ProtocolIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldProtocol, vs...)) +} + +// ProtocolNotIn applies the NotIn predicate on the "protocol" field. +func ProtocolNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldProtocol, vs...)) +} + +// ProtocolGT applies the GT predicate on the "protocol" field. +func ProtocolGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldProtocol, v)) +} + +// ProtocolGTE applies the GTE predicate on the "protocol" field. +func ProtocolGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldProtocol, v)) +} + +// ProtocolLT applies the LT predicate on the "protocol" field. +func ProtocolLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldProtocol, v)) +} + +// ProtocolLTE applies the LTE predicate on the "protocol" field. +func ProtocolLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldProtocol, v)) +} + +// ProtocolContains applies the Contains predicate on the "protocol" field. +func ProtocolContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldProtocol, v)) +} + +// ProtocolHasPrefix applies the HasPrefix predicate on the "protocol" field. +func ProtocolHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldProtocol, v)) +} + +// ProtocolHasSuffix applies the HasSuffix predicate on the "protocol" field. +func ProtocolHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldProtocol, v)) +} + +// ProtocolEqualFold applies the EqualFold predicate on the "protocol" field. +func ProtocolEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldProtocol, v)) +} + +// ProtocolContainsFold applies the ContainsFold predicate on the "protocol" field. +func ProtocolContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldProtocol, v)) +} + +// HostEQ applies the EQ predicate on the "host" field. +func HostEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldHost, v)) +} + +// HostNEQ applies the NEQ predicate on the "host" field. +func HostNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldHost, v)) +} + +// HostIn applies the In predicate on the "host" field. +func HostIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldHost, vs...)) +} + +// HostNotIn applies the NotIn predicate on the "host" field. +func HostNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldHost, vs...)) +} + +// HostGT applies the GT predicate on the "host" field. +func HostGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldHost, v)) +} + +// HostGTE applies the GTE predicate on the "host" field. +func HostGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldHost, v)) +} + +// HostLT applies the LT predicate on the "host" field. +func HostLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldHost, v)) +} + +// HostLTE applies the LTE predicate on the "host" field. +func HostLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldHost, v)) +} + +// HostContains applies the Contains predicate on the "host" field. +func HostContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldHost, v)) +} + +// HostHasPrefix applies the HasPrefix predicate on the "host" field. +func HostHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldHost, v)) +} + +// HostHasSuffix applies the HasSuffix predicate on the "host" field. +func HostHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldHost, v)) +} + +// HostEqualFold applies the EqualFold predicate on the "host" field. +func HostEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldHost, v)) +} + +// HostContainsFold applies the ContainsFold predicate on the "host" field. +func HostContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldHost, v)) +} + +// PortEQ applies the EQ predicate on the "port" field. +func PortEQ(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPort, v)) +} + +// PortNEQ applies the NEQ predicate on the "port" field. +func PortNEQ(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldPort, v)) +} + +// PortIn applies the In predicate on the "port" field. +func PortIn(vs ...int) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldPort, vs...)) +} + +// PortNotIn applies the NotIn predicate on the "port" field. +func PortNotIn(vs ...int) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldPort, vs...)) +} + +// PortGT applies the GT predicate on the "port" field. +func PortGT(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldPort, v)) +} + +// PortGTE applies the GTE predicate on the "port" field. +func PortGTE(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldPort, v)) +} + +// PortLT applies the LT predicate on the "port" field. +func PortLT(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldPort, v)) +} + +// PortLTE applies the LTE predicate on the "port" field. +func PortLTE(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldPort, v)) +} + +// UsernameEQ applies the EQ predicate on the "username" field. +func UsernameEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUsername, v)) +} + +// UsernameNEQ applies the NEQ predicate on the "username" field. +func UsernameNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldUsername, v)) +} + +// UsernameIn applies the In predicate on the "username" field. +func UsernameIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldUsername, vs...)) +} + +// UsernameNotIn applies the NotIn predicate on the "username" field. +func UsernameNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldUsername, vs...)) +} + +// UsernameGT applies the GT predicate on the "username" field. +func UsernameGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldUsername, v)) +} + +// UsernameGTE applies the GTE predicate on the "username" field. +func UsernameGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldUsername, v)) +} + +// UsernameLT applies the LT predicate on the "username" field. +func UsernameLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldUsername, v)) +} + +// UsernameLTE applies the LTE predicate on the "username" field. +func UsernameLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldUsername, v)) +} + +// UsernameContains applies the Contains predicate on the "username" field. +func UsernameContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldUsername, v)) +} + +// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. +func UsernameHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldUsername, v)) +} + +// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. +func UsernameHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldUsername, v)) +} + +// UsernameIsNil applies the IsNil predicate on the "username" field. +func UsernameIsNil() predicate.Proxy { + return predicate.Proxy(sql.FieldIsNull(FieldUsername)) +} + +// UsernameNotNil applies the NotNil predicate on the "username" field. +func UsernameNotNil() predicate.Proxy { + return predicate.Proxy(sql.FieldNotNull(FieldUsername)) +} + +// UsernameEqualFold applies the EqualFold predicate on the "username" field. +func UsernameEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldUsername, v)) +} + +// UsernameContainsFold applies the ContainsFold predicate on the "username" field. +func UsernameContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldUsername, v)) +} + +// PasswordEQ applies the EQ predicate on the "password" field. +func PasswordEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPassword, v)) +} + +// PasswordNEQ applies the NEQ predicate on the "password" field. +func PasswordNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldPassword, v)) +} + +// PasswordIn applies the In predicate on the "password" field. +func PasswordIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldPassword, vs...)) +} + +// PasswordNotIn applies the NotIn predicate on the "password" field. +func PasswordNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldPassword, vs...)) +} + +// PasswordGT applies the GT predicate on the "password" field. +func PasswordGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldPassword, v)) +} + +// PasswordGTE applies the GTE predicate on the "password" field. +func PasswordGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldPassword, v)) +} + +// PasswordLT applies the LT predicate on the "password" field. +func PasswordLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldPassword, v)) +} + +// PasswordLTE applies the LTE predicate on the "password" field. +func PasswordLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldPassword, v)) +} + +// PasswordContains applies the Contains predicate on the "password" field. +func PasswordContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldPassword, v)) +} + +// PasswordHasPrefix applies the HasPrefix predicate on the "password" field. +func PasswordHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldPassword, v)) +} + +// PasswordHasSuffix applies the HasSuffix predicate on the "password" field. +func PasswordHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldPassword, v)) +} + +// PasswordIsNil applies the IsNil predicate on the "password" field. +func PasswordIsNil() predicate.Proxy { + return predicate.Proxy(sql.FieldIsNull(FieldPassword)) +} + +// PasswordNotNil applies the NotNil predicate on the "password" field. +func PasswordNotNil() predicate.Proxy { + return predicate.Proxy(sql.FieldNotNull(FieldPassword)) +} + +// PasswordEqualFold applies the EqualFold predicate on the "password" field. +func PasswordEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldPassword, v)) +} + +// PasswordContainsFold applies the ContainsFold predicate on the "password" field. +func PasswordContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldPassword, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldStatus, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Proxy) predicate.Proxy { + return predicate.Proxy(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Proxy) predicate.Proxy { + return predicate.Proxy(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Proxy) predicate.Proxy { + return predicate.Proxy(sql.NotPredicates(p)) +} diff --git a/backend/ent/proxy_create.go b/backend/ent/proxy_create.go new file mode 100644 index 00000000..386abaec --- /dev/null +++ b/backend/ent/proxy_create.go @@ -0,0 +1,1080 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyCreate is the builder for creating a Proxy entity. +type ProxyCreate struct { + config + mutation *ProxyMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *ProxyCreate) SetCreatedAt(v time.Time) *ProxyCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableCreatedAt(v *time.Time) *ProxyCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *ProxyCreate) SetUpdatedAt(v time.Time) *ProxyCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableUpdatedAt(v *time.Time) *ProxyCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *ProxyCreate) SetDeletedAt(v time.Time) *ProxyCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableDeletedAt(v *time.Time) *ProxyCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *ProxyCreate) SetName(v string) *ProxyCreate { + _c.mutation.SetName(v) + return _c +} + +// SetProtocol sets the "protocol" field. +func (_c *ProxyCreate) SetProtocol(v string) *ProxyCreate { + _c.mutation.SetProtocol(v) + return _c +} + +// SetHost sets the "host" field. +func (_c *ProxyCreate) SetHost(v string) *ProxyCreate { + _c.mutation.SetHost(v) + return _c +} + +// SetPort sets the "port" field. +func (_c *ProxyCreate) SetPort(v int) *ProxyCreate { + _c.mutation.SetPort(v) + return _c +} + +// SetUsername sets the "username" field. +func (_c *ProxyCreate) SetUsername(v string) *ProxyCreate { + _c.mutation.SetUsername(v) + return _c +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableUsername(v *string) *ProxyCreate { + if v != nil { + _c.SetUsername(*v) + } + return _c +} + +// SetPassword sets the "password" field. +func (_c *ProxyCreate) SetPassword(v string) *ProxyCreate { + _c.mutation.SetPassword(v) + return _c +} + +// SetNillablePassword sets the "password" field if the given value is not nil. +func (_c *ProxyCreate) SetNillablePassword(v *string) *ProxyCreate { + if v != nil { + _c.SetPassword(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *ProxyCreate) SetStatus(v string) *ProxyCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableStatus(v *string) *ProxyCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// Mutation returns the ProxyMutation object of the builder. +func (_c *ProxyCreate) Mutation() *ProxyMutation { + return _c.mutation +} + +// Save creates the Proxy in the database. +func (_c *ProxyCreate) Save(ctx context.Context) (*Proxy, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *ProxyCreate) SaveX(ctx context.Context) *Proxy { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ProxyCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ProxyCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *ProxyCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if proxy.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := proxy.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if proxy.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := proxy.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := proxy.DefaultStatus + _c.mutation.SetStatus(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *ProxyCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Proxy.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Proxy.updated_at"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Proxy.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := proxy.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)} + } + } + if _, ok := _c.mutation.Protocol(); !ok { + return &ValidationError{Name: "protocol", err: errors.New(`ent: missing required field "Proxy.protocol"`)} + } + if v, ok := _c.mutation.Protocol(); ok { + if err := proxy.ProtocolValidator(v); err != nil { + return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)} + } + } + if _, ok := _c.mutation.Host(); !ok { + return &ValidationError{Name: "host", err: errors.New(`ent: missing required field "Proxy.host"`)} + } + if v, ok := _c.mutation.Host(); ok { + if err := proxy.HostValidator(v); err != nil { + return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)} + } + } + if _, ok := _c.mutation.Port(); !ok { + return &ValidationError{Name: "port", err: errors.New(`ent: missing required field "Proxy.port"`)} + } + if v, ok := _c.mutation.Username(); ok { + if err := proxy.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)} + } + } + if v, ok := _c.mutation.Password(); ok { + if err := proxy.PasswordValidator(v); err != nil { + return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Proxy.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := proxy.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)} + } + } + return nil +} + +func (_c *ProxyCreate) sqlSave(ctx context.Context) (*Proxy, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *ProxyCreate) createSpec() (*Proxy, *sqlgraph.CreateSpec) { + var ( + _node = &Proxy{config: _c.config} + _spec = sqlgraph.NewCreateSpec(proxy.Table, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(proxy.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(proxy.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Protocol(); ok { + _spec.SetField(proxy.FieldProtocol, field.TypeString, value) + _node.Protocol = value + } + if value, ok := _c.mutation.Host(); ok { + _spec.SetField(proxy.FieldHost, field.TypeString, value) + _node.Host = value + } + if value, ok := _c.mutation.Port(); ok { + _spec.SetField(proxy.FieldPort, field.TypeInt, value) + _node.Port = value + } + if value, ok := _c.mutation.Username(); ok { + _spec.SetField(proxy.FieldUsername, field.TypeString, value) + _node.Username = &value + } + if value, ok := _c.mutation.Password(); ok { + _spec.SetField(proxy.FieldPassword, field.TypeString, value) + _node.Password = &value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(proxy.FieldStatus, field.TypeString, value) + _node.Status = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Proxy.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ProxyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ProxyCreate) OnConflict(opts ...sql.ConflictOption) *ProxyUpsertOne { + _c.conflict = opts + return &ProxyUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ProxyCreate) OnConflictColumns(columns ...string) *ProxyUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ProxyUpsertOne{ + create: _c, + } +} + +type ( + // ProxyUpsertOne is the builder for "upsert"-ing + // one Proxy node. + ProxyUpsertOne struct { + create *ProxyCreate + } + + // ProxyUpsert is the "OnConflict" setter. + ProxyUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *ProxyUpsert) SetUpdatedAt(v time.Time) *ProxyUpsert { + u.Set(proxy.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateUpdatedAt() *ProxyUpsert { + u.SetExcluded(proxy.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ProxyUpsert) SetDeletedAt(v time.Time) *ProxyUpsert { + u.Set(proxy.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateDeletedAt() *ProxyUpsert { + u.SetExcluded(proxy.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ProxyUpsert) ClearDeletedAt() *ProxyUpsert { + u.SetNull(proxy.FieldDeletedAt) + return u +} + +// SetName sets the "name" field. +func (u *ProxyUpsert) SetName(v string) *ProxyUpsert { + u.Set(proxy.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateName() *ProxyUpsert { + u.SetExcluded(proxy.FieldName) + return u +} + +// SetProtocol sets the "protocol" field. +func (u *ProxyUpsert) SetProtocol(v string) *ProxyUpsert { + u.Set(proxy.FieldProtocol, v) + return u +} + +// UpdateProtocol sets the "protocol" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateProtocol() *ProxyUpsert { + u.SetExcluded(proxy.FieldProtocol) + return u +} + +// SetHost sets the "host" field. +func (u *ProxyUpsert) SetHost(v string) *ProxyUpsert { + u.Set(proxy.FieldHost, v) + return u +} + +// UpdateHost sets the "host" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateHost() *ProxyUpsert { + u.SetExcluded(proxy.FieldHost) + return u +} + +// SetPort sets the "port" field. +func (u *ProxyUpsert) SetPort(v int) *ProxyUpsert { + u.Set(proxy.FieldPort, v) + return u +} + +// UpdatePort sets the "port" field to the value that was provided on create. +func (u *ProxyUpsert) UpdatePort() *ProxyUpsert { + u.SetExcluded(proxy.FieldPort) + return u +} + +// AddPort adds v to the "port" field. +func (u *ProxyUpsert) AddPort(v int) *ProxyUpsert { + u.Add(proxy.FieldPort, v) + return u +} + +// SetUsername sets the "username" field. +func (u *ProxyUpsert) SetUsername(v string) *ProxyUpsert { + u.Set(proxy.FieldUsername, v) + return u +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateUsername() *ProxyUpsert { + u.SetExcluded(proxy.FieldUsername) + return u +} + +// ClearUsername clears the value of the "username" field. +func (u *ProxyUpsert) ClearUsername() *ProxyUpsert { + u.SetNull(proxy.FieldUsername) + return u +} + +// SetPassword sets the "password" field. +func (u *ProxyUpsert) SetPassword(v string) *ProxyUpsert { + u.Set(proxy.FieldPassword, v) + return u +} + +// UpdatePassword sets the "password" field to the value that was provided on create. +func (u *ProxyUpsert) UpdatePassword() *ProxyUpsert { + u.SetExcluded(proxy.FieldPassword) + return u +} + +// ClearPassword clears the value of the "password" field. +func (u *ProxyUpsert) ClearPassword() *ProxyUpsert { + u.SetNull(proxy.FieldPassword) + return u +} + +// SetStatus sets the "status" field. +func (u *ProxyUpsert) SetStatus(v string) *ProxyUpsert { + u.Set(proxy.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateStatus() *ProxyUpsert { + u.SetExcluded(proxy.FieldStatus) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ProxyUpsertOne) UpdateNewValues() *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(proxy.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ProxyUpsertOne) Ignore() *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ProxyUpsertOne) DoNothing() *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ProxyCreate.OnConflict +// documentation for more info. +func (u *ProxyUpsertOne) Update(set func(*ProxyUpsert)) *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ProxyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ProxyUpsertOne) SetUpdatedAt(v time.Time) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateUpdatedAt() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ProxyUpsertOne) SetDeletedAt(v time.Time) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateDeletedAt() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ProxyUpsertOne) ClearDeletedAt() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *ProxyUpsertOne) SetName(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateName() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateName() + }) +} + +// SetProtocol sets the "protocol" field. +func (u *ProxyUpsertOne) SetProtocol(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetProtocol(v) + }) +} + +// UpdateProtocol sets the "protocol" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateProtocol() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateProtocol() + }) +} + +// SetHost sets the "host" field. +func (u *ProxyUpsertOne) SetHost(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetHost(v) + }) +} + +// UpdateHost sets the "host" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateHost() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateHost() + }) +} + +// SetPort sets the "port" field. +func (u *ProxyUpsertOne) SetPort(v int) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetPort(v) + }) +} + +// AddPort adds v to the "port" field. +func (u *ProxyUpsertOne) AddPort(v int) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.AddPort(v) + }) +} + +// UpdatePort sets the "port" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdatePort() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePort() + }) +} + +// SetUsername sets the "username" field. +func (u *ProxyUpsertOne) SetUsername(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateUsername() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUsername() + }) +} + +// ClearUsername clears the value of the "username" field. +func (u *ProxyUpsertOne) ClearUsername() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.ClearUsername() + }) +} + +// SetPassword sets the "password" field. +func (u *ProxyUpsertOne) SetPassword(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetPassword(v) + }) +} + +// UpdatePassword sets the "password" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdatePassword() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePassword() + }) +} + +// ClearPassword clears the value of the "password" field. +func (u *ProxyUpsertOne) ClearPassword() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.ClearPassword() + }) +} + +// SetStatus sets the "status" field. +func (u *ProxyUpsertOne) SetStatus(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateStatus() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *ProxyUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ProxyCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ProxyUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ProxyUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ProxyUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ProxyCreateBulk is the builder for creating many Proxy entities in bulk. +type ProxyCreateBulk struct { + config + err error + builders []*ProxyCreate + conflict []sql.ConflictOption +} + +// Save creates the Proxy entities in the database. +func (_c *ProxyCreateBulk) Save(ctx context.Context) ([]*Proxy, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Proxy, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ProxyMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *ProxyCreateBulk) SaveX(ctx context.Context) []*Proxy { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ProxyCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ProxyCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Proxy.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ProxyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ProxyCreateBulk) OnConflict(opts ...sql.ConflictOption) *ProxyUpsertBulk { + _c.conflict = opts + return &ProxyUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ProxyCreateBulk) OnConflictColumns(columns ...string) *ProxyUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ProxyUpsertBulk{ + create: _c, + } +} + +// ProxyUpsertBulk is the builder for "upsert"-ing +// a bulk of Proxy nodes. +type ProxyUpsertBulk struct { + create *ProxyCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ProxyUpsertBulk) UpdateNewValues() *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(proxy.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ProxyUpsertBulk) Ignore() *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ProxyUpsertBulk) DoNothing() *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ProxyCreateBulk.OnConflict +// documentation for more info. +func (u *ProxyUpsertBulk) Update(set func(*ProxyUpsert)) *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ProxyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ProxyUpsertBulk) SetUpdatedAt(v time.Time) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateUpdatedAt() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ProxyUpsertBulk) SetDeletedAt(v time.Time) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateDeletedAt() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ProxyUpsertBulk) ClearDeletedAt() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *ProxyUpsertBulk) SetName(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateName() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateName() + }) +} + +// SetProtocol sets the "protocol" field. +func (u *ProxyUpsertBulk) SetProtocol(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetProtocol(v) + }) +} + +// UpdateProtocol sets the "protocol" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateProtocol() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateProtocol() + }) +} + +// SetHost sets the "host" field. +func (u *ProxyUpsertBulk) SetHost(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetHost(v) + }) +} + +// UpdateHost sets the "host" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateHost() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateHost() + }) +} + +// SetPort sets the "port" field. +func (u *ProxyUpsertBulk) SetPort(v int) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetPort(v) + }) +} + +// AddPort adds v to the "port" field. +func (u *ProxyUpsertBulk) AddPort(v int) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.AddPort(v) + }) +} + +// UpdatePort sets the "port" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdatePort() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePort() + }) +} + +// SetUsername sets the "username" field. +func (u *ProxyUpsertBulk) SetUsername(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateUsername() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUsername() + }) +} + +// ClearUsername clears the value of the "username" field. +func (u *ProxyUpsertBulk) ClearUsername() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.ClearUsername() + }) +} + +// SetPassword sets the "password" field. +func (u *ProxyUpsertBulk) SetPassword(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetPassword(v) + }) +} + +// UpdatePassword sets the "password" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdatePassword() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePassword() + }) +} + +// ClearPassword clears the value of the "password" field. +func (u *ProxyUpsertBulk) ClearPassword() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.ClearPassword() + }) +} + +// SetStatus sets the "status" field. +func (u *ProxyUpsertBulk) SetStatus(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateStatus() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *ProxyUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ProxyCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ProxyCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ProxyUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/proxy_delete.go b/backend/ent/proxy_delete.go new file mode 100644 index 00000000..eeeea58b --- /dev/null +++ b/backend/ent/proxy_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyDelete is the builder for deleting a Proxy entity. +type ProxyDelete struct { + config + hooks []Hook + mutation *ProxyMutation +} + +// Where appends a list predicates to the ProxyDelete builder. +func (_d *ProxyDelete) Where(ps ...predicate.Proxy) *ProxyDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *ProxyDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ProxyDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *ProxyDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(proxy.Table, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// ProxyDeleteOne is the builder for deleting a single Proxy entity. +type ProxyDeleteOne struct { + _d *ProxyDelete +} + +// Where appends a list predicates to the ProxyDelete builder. +func (_d *ProxyDeleteOne) Where(ps ...predicate.Proxy) *ProxyDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *ProxyDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{proxy.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ProxyDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/proxy_query.go b/backend/ent/proxy_query.go new file mode 100644 index 00000000..b0599553 --- /dev/null +++ b/backend/ent/proxy_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyQuery is the builder for querying Proxy entities. +type ProxyQuery struct { + config + ctx *QueryContext + order []proxy.OrderOption + inters []Interceptor + predicates []predicate.Proxy + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ProxyQuery builder. +func (_q *ProxyQuery) Where(ps ...predicate.Proxy) *ProxyQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *ProxyQuery) Limit(limit int) *ProxyQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *ProxyQuery) Offset(offset int) *ProxyQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *ProxyQuery) Unique(unique bool) *ProxyQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *ProxyQuery) Order(o ...proxy.OrderOption) *ProxyQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first Proxy entity from the query. +// Returns a *NotFoundError when no Proxy was found. +func (_q *ProxyQuery) First(ctx context.Context) (*Proxy, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{proxy.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *ProxyQuery) FirstX(ctx context.Context) *Proxy { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Proxy ID from the query. +// Returns a *NotFoundError when no Proxy ID was found. +func (_q *ProxyQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{proxy.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *ProxyQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Proxy entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Proxy entity is found. +// Returns a *NotFoundError when no Proxy entities are found. +func (_q *ProxyQuery) Only(ctx context.Context) (*Proxy, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{proxy.Label} + default: + return nil, &NotSingularError{proxy.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *ProxyQuery) OnlyX(ctx context.Context) *Proxy { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Proxy ID in the query. +// Returns a *NotSingularError when more than one Proxy ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *ProxyQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{proxy.Label} + default: + err = &NotSingularError{proxy.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *ProxyQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Proxies. +func (_q *ProxyQuery) All(ctx context.Context) ([]*Proxy, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Proxy, *ProxyQuery]() + return withInterceptors[[]*Proxy](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *ProxyQuery) AllX(ctx context.Context) []*Proxy { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Proxy IDs. +func (_q *ProxyQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(proxy.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *ProxyQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *ProxyQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*ProxyQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *ProxyQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *ProxyQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *ProxyQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ProxyQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *ProxyQuery) Clone() *ProxyQuery { + if _q == nil { + return nil + } + return &ProxyQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]proxy.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Proxy{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Proxy.Query(). +// GroupBy(proxy.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *ProxyQuery) GroupBy(field string, fields ...string) *ProxyGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ProxyGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = proxy.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Proxy.Query(). +// Select(proxy.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *ProxyQuery) Select(fields ...string) *ProxySelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ProxySelect{ProxyQuery: _q} + sbuild.label = proxy.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ProxySelect configured with the given aggregations. +func (_q *ProxyQuery) Aggregate(fns ...AggregateFunc) *ProxySelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *ProxyQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !proxy.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *ProxyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Proxy, error) { + var ( + nodes = []*Proxy{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Proxy).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Proxy{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *ProxyQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *ProxyQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, proxy.FieldID) + for i := range fields { + if fields[i] != proxy.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *ProxyQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(proxy.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = proxy.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ProxyGroupBy is the group-by builder for Proxy entities. +type ProxyGroupBy struct { + selector + build *ProxyQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *ProxyGroupBy) Aggregate(fns ...AggregateFunc) *ProxyGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *ProxyGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ProxyQuery, *ProxyGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *ProxyGroupBy) sqlScan(ctx context.Context, root *ProxyQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ProxySelect is the builder for selecting fields of Proxy entities. +type ProxySelect struct { + *ProxyQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *ProxySelect) Aggregate(fns ...AggregateFunc) *ProxySelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *ProxySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ProxyQuery, *ProxySelect](ctx, _s.ProxyQuery, _s, _s.inters, v) +} + +func (_s *ProxySelect) sqlScan(ctx context.Context, root *ProxyQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/proxy_update.go b/backend/ent/proxy_update.go new file mode 100644 index 00000000..3f5e1a7f --- /dev/null +++ b/backend/ent/proxy_update.go @@ -0,0 +1,646 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyUpdate is the builder for updating Proxy entities. +type ProxyUpdate struct { + config + hooks []Hook + mutation *ProxyMutation +} + +// Where appends a list predicates to the ProxyUpdate builder. +func (_u *ProxyUpdate) Where(ps ...predicate.Proxy) *ProxyUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ProxyUpdate) SetUpdatedAt(v time.Time) *ProxyUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ProxyUpdate) SetDeletedAt(v time.Time) *ProxyUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableDeletedAt(v *time.Time) *ProxyUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ProxyUpdate) ClearDeletedAt() *ProxyUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *ProxyUpdate) SetName(v string) *ProxyUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableName(v *string) *ProxyUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetProtocol sets the "protocol" field. +func (_u *ProxyUpdate) SetProtocol(v string) *ProxyUpdate { + _u.mutation.SetProtocol(v) + return _u +} + +// SetNillableProtocol sets the "protocol" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableProtocol(v *string) *ProxyUpdate { + if v != nil { + _u.SetProtocol(*v) + } + return _u +} + +// SetHost sets the "host" field. +func (_u *ProxyUpdate) SetHost(v string) *ProxyUpdate { + _u.mutation.SetHost(v) + return _u +} + +// SetNillableHost sets the "host" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableHost(v *string) *ProxyUpdate { + if v != nil { + _u.SetHost(*v) + } + return _u +} + +// SetPort sets the "port" field. +func (_u *ProxyUpdate) SetPort(v int) *ProxyUpdate { + _u.mutation.ResetPort() + _u.mutation.SetPort(v) + return _u +} + +// SetNillablePort sets the "port" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillablePort(v *int) *ProxyUpdate { + if v != nil { + _u.SetPort(*v) + } + return _u +} + +// AddPort adds value to the "port" field. +func (_u *ProxyUpdate) AddPort(v int) *ProxyUpdate { + _u.mutation.AddPort(v) + return _u +} + +// SetUsername sets the "username" field. +func (_u *ProxyUpdate) SetUsername(v string) *ProxyUpdate { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableUsername(v *string) *ProxyUpdate { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// ClearUsername clears the value of the "username" field. +func (_u *ProxyUpdate) ClearUsername() *ProxyUpdate { + _u.mutation.ClearUsername() + return _u +} + +// SetPassword sets the "password" field. +func (_u *ProxyUpdate) SetPassword(v string) *ProxyUpdate { + _u.mutation.SetPassword(v) + return _u +} + +// SetNillablePassword sets the "password" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillablePassword(v *string) *ProxyUpdate { + if v != nil { + _u.SetPassword(*v) + } + return _u +} + +// ClearPassword clears the value of the "password" field. +func (_u *ProxyUpdate) ClearPassword() *ProxyUpdate { + _u.mutation.ClearPassword() + return _u +} + +// SetStatus sets the "status" field. +func (_u *ProxyUpdate) SetStatus(v string) *ProxyUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableStatus(v *string) *ProxyUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// Mutation returns the ProxyMutation object of the builder. +func (_u *ProxyUpdate) Mutation() *ProxyMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *ProxyUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ProxyUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *ProxyUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ProxyUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ProxyUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if proxy.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := proxy.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ProxyUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := proxy.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)} + } + } + if v, ok := _u.mutation.Protocol(); ok { + if err := proxy.ProtocolValidator(v); err != nil { + return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)} + } + } + if v, ok := _u.mutation.Host(); ok { + if err := proxy.HostValidator(v); err != nil { + return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := proxy.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)} + } + } + if v, ok := _u.mutation.Password(); ok { + if err := proxy.PasswordValidator(v); err != nil { + return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := proxy.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)} + } + } + return nil +} + +func (_u *ProxyUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(proxy.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(proxy.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Protocol(); ok { + _spec.SetField(proxy.FieldProtocol, field.TypeString, value) + } + if value, ok := _u.mutation.Host(); ok { + _spec.SetField(proxy.FieldHost, field.TypeString, value) + } + if value, ok := _u.mutation.Port(); ok { + _spec.SetField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPort(); ok { + _spec.AddField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(proxy.FieldUsername, field.TypeString, value) + } + if _u.mutation.UsernameCleared() { + _spec.ClearField(proxy.FieldUsername, field.TypeString) + } + if value, ok := _u.mutation.Password(); ok { + _spec.SetField(proxy.FieldPassword, field.TypeString, value) + } + if _u.mutation.PasswordCleared() { + _spec.ClearField(proxy.FieldPassword, field.TypeString) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(proxy.FieldStatus, field.TypeString, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{proxy.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// ProxyUpdateOne is the builder for updating a single Proxy entity. +type ProxyUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ProxyMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ProxyUpdateOne) SetUpdatedAt(v time.Time) *ProxyUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ProxyUpdateOne) SetDeletedAt(v time.Time) *ProxyUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableDeletedAt(v *time.Time) *ProxyUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ProxyUpdateOne) ClearDeletedAt() *ProxyUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *ProxyUpdateOne) SetName(v string) *ProxyUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableName(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetProtocol sets the "protocol" field. +func (_u *ProxyUpdateOne) SetProtocol(v string) *ProxyUpdateOne { + _u.mutation.SetProtocol(v) + return _u +} + +// SetNillableProtocol sets the "protocol" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableProtocol(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetProtocol(*v) + } + return _u +} + +// SetHost sets the "host" field. +func (_u *ProxyUpdateOne) SetHost(v string) *ProxyUpdateOne { + _u.mutation.SetHost(v) + return _u +} + +// SetNillableHost sets the "host" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableHost(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetHost(*v) + } + return _u +} + +// SetPort sets the "port" field. +func (_u *ProxyUpdateOne) SetPort(v int) *ProxyUpdateOne { + _u.mutation.ResetPort() + _u.mutation.SetPort(v) + return _u +} + +// SetNillablePort sets the "port" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillablePort(v *int) *ProxyUpdateOne { + if v != nil { + _u.SetPort(*v) + } + return _u +} + +// AddPort adds value to the "port" field. +func (_u *ProxyUpdateOne) AddPort(v int) *ProxyUpdateOne { + _u.mutation.AddPort(v) + return _u +} + +// SetUsername sets the "username" field. +func (_u *ProxyUpdateOne) SetUsername(v string) *ProxyUpdateOne { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableUsername(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// ClearUsername clears the value of the "username" field. +func (_u *ProxyUpdateOne) ClearUsername() *ProxyUpdateOne { + _u.mutation.ClearUsername() + return _u +} + +// SetPassword sets the "password" field. +func (_u *ProxyUpdateOne) SetPassword(v string) *ProxyUpdateOne { + _u.mutation.SetPassword(v) + return _u +} + +// SetNillablePassword sets the "password" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillablePassword(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetPassword(*v) + } + return _u +} + +// ClearPassword clears the value of the "password" field. +func (_u *ProxyUpdateOne) ClearPassword() *ProxyUpdateOne { + _u.mutation.ClearPassword() + return _u +} + +// SetStatus sets the "status" field. +func (_u *ProxyUpdateOne) SetStatus(v string) *ProxyUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableStatus(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// Mutation returns the ProxyMutation object of the builder. +func (_u *ProxyUpdateOne) Mutation() *ProxyMutation { + return _u.mutation +} + +// Where appends a list predicates to the ProxyUpdate builder. +func (_u *ProxyUpdateOne) Where(ps ...predicate.Proxy) *ProxyUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *ProxyUpdateOne) Select(field string, fields ...string) *ProxyUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Proxy entity. +func (_u *ProxyUpdateOne) Save(ctx context.Context) (*Proxy, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ProxyUpdateOne) SaveX(ctx context.Context) *Proxy { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *ProxyUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ProxyUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ProxyUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if proxy.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := proxy.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ProxyUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := proxy.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)} + } + } + if v, ok := _u.mutation.Protocol(); ok { + if err := proxy.ProtocolValidator(v); err != nil { + return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)} + } + } + if v, ok := _u.mutation.Host(); ok { + if err := proxy.HostValidator(v); err != nil { + return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := proxy.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)} + } + } + if v, ok := _u.mutation.Password(); ok { + if err := proxy.PasswordValidator(v); err != nil { + return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := proxy.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)} + } + } + return nil +} + +func (_u *ProxyUpdateOne) sqlSave(ctx context.Context) (_node *Proxy, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Proxy.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, proxy.FieldID) + for _, f := range fields { + if !proxy.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != proxy.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(proxy.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(proxy.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Protocol(); ok { + _spec.SetField(proxy.FieldProtocol, field.TypeString, value) + } + if value, ok := _u.mutation.Host(); ok { + _spec.SetField(proxy.FieldHost, field.TypeString, value) + } + if value, ok := _u.mutation.Port(); ok { + _spec.SetField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPort(); ok { + _spec.AddField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(proxy.FieldUsername, field.TypeString, value) + } + if _u.mutation.UsernameCleared() { + _spec.ClearField(proxy.FieldUsername, field.TypeString) + } + if value, ok := _u.mutation.Password(); ok { + _spec.SetField(proxy.FieldPassword, field.TypeString, value) + } + if _u.mutation.PasswordCleared() { + _spec.ClearField(proxy.FieldPassword, field.TypeString) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(proxy.FieldStatus, field.TypeString, value) + } + _node = &Proxy{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{proxy.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/redeemcode.go b/backend/ent/redeemcode.go new file mode 100644 index 00000000..24cd4231 --- /dev/null +++ b/backend/ent/redeemcode.go @@ -0,0 +1,267 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCode is the model entity for the RedeemCode schema. +type RedeemCode struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // Code holds the value of the "code" field. + Code string `json:"code,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Value holds the value of the "value" field. + Value float64 `json:"value,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // UsedBy holds the value of the "used_by" field. + UsedBy *int64 `json:"used_by,omitempty"` + // UsedAt holds the value of the "used_at" field. + UsedAt *time.Time `json:"used_at,omitempty"` + // Notes holds the value of the "notes" field. + Notes *string `json:"notes,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID *int64 `json:"group_id,omitempty"` + // ValidityDays holds the value of the "validity_days" field. + ValidityDays int `json:"validity_days,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the RedeemCodeQuery when eager-loading is set. + Edges RedeemCodeEdges `json:"edges"` + selectValues sql.SelectValues +} + +// RedeemCodeEdges holds the relations/edges for other nodes in the graph. +type RedeemCodeEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e RedeemCodeEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e RedeemCodeEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*RedeemCode) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case redeemcode.FieldValue: + values[i] = new(sql.NullFloat64) + case redeemcode.FieldID, redeemcode.FieldUsedBy, redeemcode.FieldGroupID, redeemcode.FieldValidityDays: + values[i] = new(sql.NullInt64) + case redeemcode.FieldCode, redeemcode.FieldType, redeemcode.FieldStatus, redeemcode.FieldNotes: + values[i] = new(sql.NullString) + case redeemcode.FieldUsedAt, redeemcode.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the RedeemCode fields. +func (_m *RedeemCode) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case redeemcode.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case redeemcode.FieldCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code", values[i]) + } else if value.Valid { + _m.Code = value.String + } + case redeemcode.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + _m.Type = value.String + } + case redeemcode.FieldValue: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + _m.Value = value.Float64 + } + case redeemcode.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case redeemcode.FieldUsedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field used_by", values[i]) + } else if value.Valid { + _m.UsedBy = new(int64) + *_m.UsedBy = value.Int64 + } + case redeemcode.FieldUsedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field used_at", values[i]) + } else if value.Valid { + _m.UsedAt = new(time.Time) + *_m.UsedAt = value.Time + } + case redeemcode.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = new(string) + *_m.Notes = value.String + } + case redeemcode.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case redeemcode.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = new(int64) + *_m.GroupID = value.Int64 + } + case redeemcode.FieldValidityDays: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field validity_days", values[i]) + } else if value.Valid { + _m.ValidityDays = int(value.Int64) + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the RedeemCode. +// This includes values selected through modifiers, order, etc. +func (_m *RedeemCode) GetValue(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the RedeemCode entity. +func (_m *RedeemCode) QueryUser() *UserQuery { + return NewRedeemCodeClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the RedeemCode entity. +func (_m *RedeemCode) QueryGroup() *GroupQuery { + return NewRedeemCodeClient(_m.config).QueryGroup(_m) +} + +// Update returns a builder for updating this RedeemCode. +// Note that you need to call RedeemCode.Unwrap() before calling this method if this RedeemCode +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *RedeemCode) Update() *RedeemCodeUpdateOne { + return NewRedeemCodeClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the RedeemCode entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *RedeemCode) Unwrap() *RedeemCode { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: RedeemCode is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *RedeemCode) String() string { + var builder strings.Builder + builder.WriteString("RedeemCode(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("code=") + builder.WriteString(_m.Code) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(_m.Type) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(fmt.Sprintf("%v", _m.Value)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.UsedBy; v != nil { + builder.WriteString("used_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.UsedAt; v != nil { + builder.WriteString("used_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.Notes; v != nil { + builder.WriteString("notes=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.GroupID; v != nil { + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("validity_days=") + builder.WriteString(fmt.Sprintf("%v", _m.ValidityDays)) + builder.WriteByte(')') + return builder.String() +} + +// RedeemCodes is a parsable slice of RedeemCode. +type RedeemCodes []*RedeemCode diff --git a/backend/ent/redeemcode/redeemcode.go b/backend/ent/redeemcode/redeemcode.go new file mode 100644 index 00000000..b010476c --- /dev/null +++ b/backend/ent/redeemcode/redeemcode.go @@ -0,0 +1,187 @@ +// Code generated by ent, DO NOT EDIT. + +package redeemcode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the redeemcode type in the database. + Label = "redeem_code" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCode holds the string denoting the code field in the database. + FieldCode = "code" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldUsedBy holds the string denoting the used_by field in the database. + FieldUsedBy = "used_by" + // FieldUsedAt holds the string denoting the used_at field in the database. + FieldUsedAt = "used_at" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldValidityDays holds the string denoting the validity_days field in the database. + FieldValidityDays = "validity_days" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // Table holds the table name of the redeemcode in the database. + Table = "redeem_codes" + // UserTable is the table that holds the user relation/edge. + UserTable = "redeem_codes" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "used_by" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "redeem_codes" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for redeemcode fields. +var Columns = []string{ + FieldID, + FieldCode, + FieldType, + FieldValue, + FieldStatus, + FieldUsedBy, + FieldUsedAt, + FieldNotes, + FieldCreatedAt, + FieldGroupID, + FieldValidityDays, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // CodeValidator is a validator for the "code" field. It is called by the builders before save. + CodeValidator func(string) error + // DefaultType holds the default value on creation for the "type" field. + DefaultType string + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator func(string) error + // DefaultValue holds the default value on creation for the "value" field. + DefaultValue float64 + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultValidityDays holds the default value on creation for the "validity_days" field. + DefaultValidityDays int +) + +// OrderOption defines the ordering options for the RedeemCode queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCode orders the results by the code field. +func ByCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCode, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByUsedBy orders the results by the used_by field. +func ByUsedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedBy, opts...).ToFunc() +} + +// ByUsedAt orders the results by the used_at field. +func ByUsedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedAt, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByValidityDays orders the results by the validity_days field. +func ByValidityDays(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValidityDays, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} diff --git a/backend/ent/redeemcode/where.go b/backend/ent/redeemcode/where.go new file mode 100644 index 00000000..1fdedba5 --- /dev/null +++ b/backend/ent/redeemcode/where.go @@ -0,0 +1,667 @@ +// Code generated by ent, DO NOT EDIT. + +package redeemcode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldID, id)) +} + +// Code applies equality check predicate on the "code" field. It's identical to CodeEQ. +func Code(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCode, v)) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldType, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValue, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldStatus, v)) +} + +// UsedBy applies equality check predicate on the "used_by" field. It's identical to UsedByEQ. +func UsedBy(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedBy, v)) +} + +// UsedAt applies equality check predicate on the "used_at" field. It's identical to UsedAtEQ. +func UsedAt(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedAt, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldGroupID, v)) +} + +// ValidityDays applies equality check predicate on the "validity_days" field. It's identical to ValidityDaysEQ. +func ValidityDays(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValidityDays, v)) +} + +// CodeEQ applies the EQ predicate on the "code" field. +func CodeEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCode, v)) +} + +// CodeNEQ applies the NEQ predicate on the "code" field. +func CodeNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldCode, v)) +} + +// CodeIn applies the In predicate on the "code" field. +func CodeIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldCode, vs...)) +} + +// CodeNotIn applies the NotIn predicate on the "code" field. +func CodeNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldCode, vs...)) +} + +// CodeGT applies the GT predicate on the "code" field. +func CodeGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldCode, v)) +} + +// CodeGTE applies the GTE predicate on the "code" field. +func CodeGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldCode, v)) +} + +// CodeLT applies the LT predicate on the "code" field. +func CodeLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldCode, v)) +} + +// CodeLTE applies the LTE predicate on the "code" field. +func CodeLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldCode, v)) +} + +// CodeContains applies the Contains predicate on the "code" field. +func CodeContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldCode, v)) +} + +// CodeHasPrefix applies the HasPrefix predicate on the "code" field. +func CodeHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldCode, v)) +} + +// CodeHasSuffix applies the HasSuffix predicate on the "code" field. +func CodeHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldCode, v)) +} + +// CodeEqualFold applies the EqualFold predicate on the "code" field. +func CodeEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldCode, v)) +} + +// CodeContainsFold applies the ContainsFold predicate on the "code" field. +func CodeContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldCode, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldType, vs...)) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldType, v)) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldType, v)) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldType, v)) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldType, v)) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldType, v)) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldType, v)) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldType, v)) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldType, v)) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldType, v)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldValue, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldStatus, v)) +} + +// UsedByEQ applies the EQ predicate on the "used_by" field. +func UsedByEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedBy, v)) +} + +// UsedByNEQ applies the NEQ predicate on the "used_by" field. +func UsedByNEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldUsedBy, v)) +} + +// UsedByIn applies the In predicate on the "used_by" field. +func UsedByIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldUsedBy, vs...)) +} + +// UsedByNotIn applies the NotIn predicate on the "used_by" field. +func UsedByNotIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldUsedBy, vs...)) +} + +// UsedByIsNil applies the IsNil predicate on the "used_by" field. +func UsedByIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldUsedBy)) +} + +// UsedByNotNil applies the NotNil predicate on the "used_by" field. +func UsedByNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldUsedBy)) +} + +// UsedAtEQ applies the EQ predicate on the "used_at" field. +func UsedAtEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedAt, v)) +} + +// UsedAtNEQ applies the NEQ predicate on the "used_at" field. +func UsedAtNEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldUsedAt, v)) +} + +// UsedAtIn applies the In predicate on the "used_at" field. +func UsedAtIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldUsedAt, vs...)) +} + +// UsedAtNotIn applies the NotIn predicate on the "used_at" field. +func UsedAtNotIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldUsedAt, vs...)) +} + +// UsedAtGT applies the GT predicate on the "used_at" field. +func UsedAtGT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldUsedAt, v)) +} + +// UsedAtGTE applies the GTE predicate on the "used_at" field. +func UsedAtGTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldUsedAt, v)) +} + +// UsedAtLT applies the LT predicate on the "used_at" field. +func UsedAtLT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldUsedAt, v)) +} + +// UsedAtLTE applies the LTE predicate on the "used_at" field. +func UsedAtLTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldUsedAt, v)) +} + +// UsedAtIsNil applies the IsNil predicate on the "used_at" field. +func UsedAtIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldUsedAt)) +} + +// UsedAtNotNil applies the NotNil predicate on the "used_at" field. +func UsedAtNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldUsedAt)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesIsNil applies the IsNil predicate on the "notes" field. +func NotesIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldNotes)) +} + +// NotesNotNil applies the NotNil predicate on the "notes" field. +func NotesNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldNotes)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldCreatedAt, v)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// GroupIDIsNil applies the IsNil predicate on the "group_id" field. +func GroupIDIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldGroupID)) +} + +// GroupIDNotNil applies the NotNil predicate on the "group_id" field. +func GroupIDNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldGroupID)) +} + +// ValidityDaysEQ applies the EQ predicate on the "validity_days" field. +func ValidityDaysEQ(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValidityDays, v)) +} + +// ValidityDaysNEQ applies the NEQ predicate on the "validity_days" field. +func ValidityDaysNEQ(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldValidityDays, v)) +} + +// ValidityDaysIn applies the In predicate on the "validity_days" field. +func ValidityDaysIn(vs ...int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldValidityDays, vs...)) +} + +// ValidityDaysNotIn applies the NotIn predicate on the "validity_days" field. +func ValidityDaysNotIn(vs ...int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldValidityDays, vs...)) +} + +// ValidityDaysGT applies the GT predicate on the "validity_days" field. +func ValidityDaysGT(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldValidityDays, v)) +} + +// ValidityDaysGTE applies the GTE predicate on the "validity_days" field. +func ValidityDaysGTE(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldValidityDays, v)) +} + +// ValidityDaysLT applies the LT predicate on the "validity_days" field. +func ValidityDaysLT(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldValidityDays, v)) +} + +// ValidityDaysLTE applies the LTE predicate on the "validity_days" field. +func ValidityDaysLTE(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldValidityDays, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.RedeemCode) predicate.RedeemCode { + return predicate.RedeemCode(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.RedeemCode) predicate.RedeemCode { + return predicate.RedeemCode(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.RedeemCode) predicate.RedeemCode { + return predicate.RedeemCode(sql.NotPredicates(p)) +} diff --git a/backend/ent/redeemcode_create.go b/backend/ent/redeemcode_create.go new file mode 100644 index 00000000..efdcee40 --- /dev/null +++ b/backend/ent/redeemcode_create.go @@ -0,0 +1,1177 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCodeCreate is the builder for creating a RedeemCode entity. +type RedeemCodeCreate struct { + config + mutation *RedeemCodeMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCode sets the "code" field. +func (_c *RedeemCodeCreate) SetCode(v string) *RedeemCodeCreate { + _c.mutation.SetCode(v) + return _c +} + +// SetType sets the "type" field. +func (_c *RedeemCodeCreate) SetType(v string) *RedeemCodeCreate { + _c.mutation.SetType(v) + return _c +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableType(v *string) *RedeemCodeCreate { + if v != nil { + _c.SetType(*v) + } + return _c +} + +// SetValue sets the "value" field. +func (_c *RedeemCodeCreate) SetValue(v float64) *RedeemCodeCreate { + _c.mutation.SetValue(v) + return _c +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableValue(v *float64) *RedeemCodeCreate { + if v != nil { + _c.SetValue(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *RedeemCodeCreate) SetStatus(v string) *RedeemCodeCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableStatus(v *string) *RedeemCodeCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetUsedBy sets the "used_by" field. +func (_c *RedeemCodeCreate) SetUsedBy(v int64) *RedeemCodeCreate { + _c.mutation.SetUsedBy(v) + return _c +} + +// SetNillableUsedBy sets the "used_by" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableUsedBy(v *int64) *RedeemCodeCreate { + if v != nil { + _c.SetUsedBy(*v) + } + return _c +} + +// SetUsedAt sets the "used_at" field. +func (_c *RedeemCodeCreate) SetUsedAt(v time.Time) *RedeemCodeCreate { + _c.mutation.SetUsedAt(v) + return _c +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableUsedAt(v *time.Time) *RedeemCodeCreate { + if v != nil { + _c.SetUsedAt(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *RedeemCodeCreate) SetNotes(v string) *RedeemCodeCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableNotes(v *string) *RedeemCodeCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *RedeemCodeCreate) SetCreatedAt(v time.Time) *RedeemCodeCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableCreatedAt(v *time.Time) *RedeemCodeCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *RedeemCodeCreate) SetGroupID(v int64) *RedeemCodeCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableGroupID(v *int64) *RedeemCodeCreate { + if v != nil { + _c.SetGroupID(*v) + } + return _c +} + +// SetValidityDays sets the "validity_days" field. +func (_c *RedeemCodeCreate) SetValidityDays(v int) *RedeemCodeCreate { + _c.mutation.SetValidityDays(v) + return _c +} + +// SetNillableValidityDays sets the "validity_days" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableValidityDays(v *int) *RedeemCodeCreate { + if v != nil { + _c.SetValidityDays(*v) + } + return _c +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (_c *RedeemCodeCreate) SetUserID(id int64) *RedeemCodeCreate { + _c.mutation.SetUserID(id) + return _c +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableUserID(id *int64) *RedeemCodeCreate { + if id != nil { + _c = _c.SetUserID(*id) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *RedeemCodeCreate) SetUser(v *User) *RedeemCodeCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *RedeemCodeCreate) SetGroup(v *Group) *RedeemCodeCreate { + return _c.SetGroupID(v.ID) +} + +// Mutation returns the RedeemCodeMutation object of the builder. +func (_c *RedeemCodeCreate) Mutation() *RedeemCodeMutation { + return _c.mutation +} + +// Save creates the RedeemCode in the database. +func (_c *RedeemCodeCreate) Save(ctx context.Context) (*RedeemCode, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *RedeemCodeCreate) SaveX(ctx context.Context) *RedeemCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *RedeemCodeCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *RedeemCodeCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *RedeemCodeCreate) defaults() { + if _, ok := _c.mutation.GetType(); !ok { + v := redeemcode.DefaultType + _c.mutation.SetType(v) + } + if _, ok := _c.mutation.Value(); !ok { + v := redeemcode.DefaultValue + _c.mutation.SetValue(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := redeemcode.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := redeemcode.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.ValidityDays(); !ok { + v := redeemcode.DefaultValidityDays + _c.mutation.SetValidityDays(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *RedeemCodeCreate) check() error { + if _, ok := _c.mutation.Code(); !ok { + return &ValidationError{Name: "code", err: errors.New(`ent: missing required field "RedeemCode.code"`)} + } + if v, ok := _c.mutation.Code(); ok { + if err := redeemcode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)} + } + } + if _, ok := _c.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "RedeemCode.type"`)} + } + if v, ok := _c.mutation.GetType(); ok { + if err := redeemcode.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)} + } + } + if _, ok := _c.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "RedeemCode.value"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "RedeemCode.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := redeemcode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "RedeemCode.created_at"`)} + } + if _, ok := _c.mutation.ValidityDays(); !ok { + return &ValidationError{Name: "validity_days", err: errors.New(`ent: missing required field "RedeemCode.validity_days"`)} + } + return nil +} + +func (_c *RedeemCodeCreate) sqlSave(ctx context.Context) (*RedeemCode, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *RedeemCodeCreate) createSpec() (*RedeemCode, *sqlgraph.CreateSpec) { + var ( + _node = &RedeemCode{config: _c.config} + _spec = sqlgraph.NewCreateSpec(redeemcode.Table, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Code(); ok { + _spec.SetField(redeemcode.FieldCode, field.TypeString, value) + _node.Code = value + } + if value, ok := _c.mutation.GetType(); ok { + _spec.SetField(redeemcode.FieldType, field.TypeString, value) + _node.Type = value + } + if value, ok := _c.mutation.Value(); ok { + _spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value) + _node.Value = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(redeemcode.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.UsedAt(); ok { + _spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value) + _node.UsedAt = &value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(redeemcode.FieldNotes, field.TypeString, value) + _node.Notes = &value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(redeemcode.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.ValidityDays(); ok { + _spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value) + _node.ValidityDays = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UsedBy = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.RedeemCode.Create(). +// SetCode(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.RedeemCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *RedeemCodeCreate) OnConflict(opts ...sql.ConflictOption) *RedeemCodeUpsertOne { + _c.conflict = opts + return &RedeemCodeUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *RedeemCodeCreate) OnConflictColumns(columns ...string) *RedeemCodeUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &RedeemCodeUpsertOne{ + create: _c, + } +} + +type ( + // RedeemCodeUpsertOne is the builder for "upsert"-ing + // one RedeemCode node. + RedeemCodeUpsertOne struct { + create *RedeemCodeCreate + } + + // RedeemCodeUpsert is the "OnConflict" setter. + RedeemCodeUpsert struct { + *sql.UpdateSet + } +) + +// SetCode sets the "code" field. +func (u *RedeemCodeUpsert) SetCode(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldCode, v) + return u +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateCode() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldCode) + return u +} + +// SetType sets the "type" field. +func (u *RedeemCodeUpsert) SetType(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldType, v) + return u +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateType() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldType) + return u +} + +// SetValue sets the "value" field. +func (u *RedeemCodeUpsert) SetValue(v float64) *RedeemCodeUpsert { + u.Set(redeemcode.FieldValue, v) + return u +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateValue() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldValue) + return u +} + +// AddValue adds v to the "value" field. +func (u *RedeemCodeUpsert) AddValue(v float64) *RedeemCodeUpsert { + u.Add(redeemcode.FieldValue, v) + return u +} + +// SetStatus sets the "status" field. +func (u *RedeemCodeUpsert) SetStatus(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateStatus() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldStatus) + return u +} + +// SetUsedBy sets the "used_by" field. +func (u *RedeemCodeUpsert) SetUsedBy(v int64) *RedeemCodeUpsert { + u.Set(redeemcode.FieldUsedBy, v) + return u +} + +// UpdateUsedBy sets the "used_by" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateUsedBy() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldUsedBy) + return u +} + +// ClearUsedBy clears the value of the "used_by" field. +func (u *RedeemCodeUpsert) ClearUsedBy() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldUsedBy) + return u +} + +// SetUsedAt sets the "used_at" field. +func (u *RedeemCodeUpsert) SetUsedAt(v time.Time) *RedeemCodeUpsert { + u.Set(redeemcode.FieldUsedAt, v) + return u +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateUsedAt() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldUsedAt) + return u +} + +// ClearUsedAt clears the value of the "used_at" field. +func (u *RedeemCodeUpsert) ClearUsedAt() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldUsedAt) + return u +} + +// SetNotes sets the "notes" field. +func (u *RedeemCodeUpsert) SetNotes(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateNotes() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldNotes) + return u +} + +// ClearNotes clears the value of the "notes" field. +func (u *RedeemCodeUpsert) ClearNotes() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldNotes) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *RedeemCodeUpsert) SetGroupID(v int64) *RedeemCodeUpsert { + u.Set(redeemcode.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateGroupID() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldGroupID) + return u +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *RedeemCodeUpsert) ClearGroupID() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldGroupID) + return u +} + +// SetValidityDays sets the "validity_days" field. +func (u *RedeemCodeUpsert) SetValidityDays(v int) *RedeemCodeUpsert { + u.Set(redeemcode.FieldValidityDays, v) + return u +} + +// UpdateValidityDays sets the "validity_days" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateValidityDays() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldValidityDays) + return u +} + +// AddValidityDays adds v to the "validity_days" field. +func (u *RedeemCodeUpsert) AddValidityDays(v int) *RedeemCodeUpsert { + u.Add(redeemcode.FieldValidityDays, v) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *RedeemCodeUpsertOne) UpdateNewValues() *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(redeemcode.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *RedeemCodeUpsertOne) Ignore() *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *RedeemCodeUpsertOne) DoNothing() *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the RedeemCodeCreate.OnConflict +// documentation for more info. +func (u *RedeemCodeUpsertOne) Update(set func(*RedeemCodeUpsert)) *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&RedeemCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *RedeemCodeUpsertOne) SetCode(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateCode() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateCode() + }) +} + +// SetType sets the "type" field. +func (u *RedeemCodeUpsertOne) SetType(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateType() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateType() + }) +} + +// SetValue sets the "value" field. +func (u *RedeemCodeUpsertOne) SetValue(v float64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValue(v) + }) +} + +// AddValue adds v to the "value" field. +func (u *RedeemCodeUpsertOne) AddValue(v float64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateValue() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValue() + }) +} + +// SetStatus sets the "status" field. +func (u *RedeemCodeUpsertOne) SetStatus(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateStatus() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetUsedBy sets the "used_by" field. +func (u *RedeemCodeUpsertOne) SetUsedBy(v int64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedBy(v) + }) +} + +// UpdateUsedBy sets the "used_by" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateUsedBy() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedBy() + }) +} + +// ClearUsedBy clears the value of the "used_by" field. +func (u *RedeemCodeUpsertOne) ClearUsedBy() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedBy() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *RedeemCodeUpsertOne) SetUsedAt(v time.Time) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateUsedAt() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedAt() + }) +} + +// ClearUsedAt clears the value of the "used_at" field. +func (u *RedeemCodeUpsertOne) ClearUsedAt() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *RedeemCodeUpsertOne) SetNotes(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateNotes() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *RedeemCodeUpsertOne) ClearNotes() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearNotes() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *RedeemCodeUpsertOne) SetGroupID(v int64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateGroupID() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *RedeemCodeUpsertOne) ClearGroupID() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearGroupID() + }) +} + +// SetValidityDays sets the "validity_days" field. +func (u *RedeemCodeUpsertOne) SetValidityDays(v int) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValidityDays(v) + }) +} + +// AddValidityDays adds v to the "validity_days" field. +func (u *RedeemCodeUpsertOne) AddValidityDays(v int) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValidityDays(v) + }) +} + +// UpdateValidityDays sets the "validity_days" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateValidityDays() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValidityDays() + }) +} + +// Exec executes the query. +func (u *RedeemCodeUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for RedeemCodeCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *RedeemCodeUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *RedeemCodeUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *RedeemCodeUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// RedeemCodeCreateBulk is the builder for creating many RedeemCode entities in bulk. +type RedeemCodeCreateBulk struct { + config + err error + builders []*RedeemCodeCreate + conflict []sql.ConflictOption +} + +// Save creates the RedeemCode entities in the database. +func (_c *RedeemCodeCreateBulk) Save(ctx context.Context) ([]*RedeemCode, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*RedeemCode, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*RedeemCodeMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *RedeemCodeCreateBulk) SaveX(ctx context.Context) []*RedeemCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *RedeemCodeCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *RedeemCodeCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.RedeemCode.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.RedeemCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *RedeemCodeCreateBulk) OnConflict(opts ...sql.ConflictOption) *RedeemCodeUpsertBulk { + _c.conflict = opts + return &RedeemCodeUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *RedeemCodeCreateBulk) OnConflictColumns(columns ...string) *RedeemCodeUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &RedeemCodeUpsertBulk{ + create: _c, + } +} + +// RedeemCodeUpsertBulk is the builder for "upsert"-ing +// a bulk of RedeemCode nodes. +type RedeemCodeUpsertBulk struct { + create *RedeemCodeCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *RedeemCodeUpsertBulk) UpdateNewValues() *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(redeemcode.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *RedeemCodeUpsertBulk) Ignore() *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *RedeemCodeUpsertBulk) DoNothing() *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the RedeemCodeCreateBulk.OnConflict +// documentation for more info. +func (u *RedeemCodeUpsertBulk) Update(set func(*RedeemCodeUpsert)) *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&RedeemCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *RedeemCodeUpsertBulk) SetCode(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateCode() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateCode() + }) +} + +// SetType sets the "type" field. +func (u *RedeemCodeUpsertBulk) SetType(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateType() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateType() + }) +} + +// SetValue sets the "value" field. +func (u *RedeemCodeUpsertBulk) SetValue(v float64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValue(v) + }) +} + +// AddValue adds v to the "value" field. +func (u *RedeemCodeUpsertBulk) AddValue(v float64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateValue() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValue() + }) +} + +// SetStatus sets the "status" field. +func (u *RedeemCodeUpsertBulk) SetStatus(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateStatus() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetUsedBy sets the "used_by" field. +func (u *RedeemCodeUpsertBulk) SetUsedBy(v int64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedBy(v) + }) +} + +// UpdateUsedBy sets the "used_by" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateUsedBy() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedBy() + }) +} + +// ClearUsedBy clears the value of the "used_by" field. +func (u *RedeemCodeUpsertBulk) ClearUsedBy() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedBy() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *RedeemCodeUpsertBulk) SetUsedAt(v time.Time) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateUsedAt() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedAt() + }) +} + +// ClearUsedAt clears the value of the "used_at" field. +func (u *RedeemCodeUpsertBulk) ClearUsedAt() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *RedeemCodeUpsertBulk) SetNotes(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateNotes() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *RedeemCodeUpsertBulk) ClearNotes() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearNotes() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *RedeemCodeUpsertBulk) SetGroupID(v int64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateGroupID() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *RedeemCodeUpsertBulk) ClearGroupID() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearGroupID() + }) +} + +// SetValidityDays sets the "validity_days" field. +func (u *RedeemCodeUpsertBulk) SetValidityDays(v int) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValidityDays(v) + }) +} + +// AddValidityDays adds v to the "validity_days" field. +func (u *RedeemCodeUpsertBulk) AddValidityDays(v int) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValidityDays(v) + }) +} + +// UpdateValidityDays sets the "validity_days" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateValidityDays() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValidityDays() + }) +} + +// Exec executes the query. +func (u *RedeemCodeUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the RedeemCodeCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for RedeemCodeCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *RedeemCodeUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/redeemcode_delete.go b/backend/ent/redeemcode_delete.go new file mode 100644 index 00000000..f16ef1e9 --- /dev/null +++ b/backend/ent/redeemcode_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" +) + +// RedeemCodeDelete is the builder for deleting a RedeemCode entity. +type RedeemCodeDelete struct { + config + hooks []Hook + mutation *RedeemCodeMutation +} + +// Where appends a list predicates to the RedeemCodeDelete builder. +func (_d *RedeemCodeDelete) Where(ps ...predicate.RedeemCode) *RedeemCodeDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *RedeemCodeDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *RedeemCodeDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *RedeemCodeDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(redeemcode.Table, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// RedeemCodeDeleteOne is the builder for deleting a single RedeemCode entity. +type RedeemCodeDeleteOne struct { + _d *RedeemCodeDelete +} + +// Where appends a list predicates to the RedeemCodeDelete builder. +func (_d *RedeemCodeDeleteOne) Where(ps ...predicate.RedeemCode) *RedeemCodeDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *RedeemCodeDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{redeemcode.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *RedeemCodeDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/redeemcode_query.go b/backend/ent/redeemcode_query.go new file mode 100644 index 00000000..442bfe81 --- /dev/null +++ b/backend/ent/redeemcode_query.go @@ -0,0 +1,687 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCodeQuery is the builder for querying RedeemCode entities. +type RedeemCodeQuery struct { + config + ctx *QueryContext + order []redeemcode.OrderOption + inters []Interceptor + predicates []predicate.RedeemCode + withUser *UserQuery + withGroup *GroupQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the RedeemCodeQuery builder. +func (_q *RedeemCodeQuery) Where(ps ...predicate.RedeemCode) *RedeemCodeQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *RedeemCodeQuery) Limit(limit int) *RedeemCodeQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *RedeemCodeQuery) Offset(offset int) *RedeemCodeQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *RedeemCodeQuery) Unique(unique bool) *RedeemCodeQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *RedeemCodeQuery) Order(o ...redeemcode.OrderOption) *RedeemCodeQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *RedeemCodeQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.UserTable, redeemcode.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *RedeemCodeQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.GroupTable, redeemcode.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first RedeemCode entity from the query. +// Returns a *NotFoundError when no RedeemCode was found. +func (_q *RedeemCodeQuery) First(ctx context.Context) (*RedeemCode, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{redeemcode.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *RedeemCodeQuery) FirstX(ctx context.Context) *RedeemCode { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first RedeemCode ID from the query. +// Returns a *NotFoundError when no RedeemCode ID was found. +func (_q *RedeemCodeQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{redeemcode.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *RedeemCodeQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single RedeemCode entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one RedeemCode entity is found. +// Returns a *NotFoundError when no RedeemCode entities are found. +func (_q *RedeemCodeQuery) Only(ctx context.Context) (*RedeemCode, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{redeemcode.Label} + default: + return nil, &NotSingularError{redeemcode.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *RedeemCodeQuery) OnlyX(ctx context.Context) *RedeemCode { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only RedeemCode ID in the query. +// Returns a *NotSingularError when more than one RedeemCode ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *RedeemCodeQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{redeemcode.Label} + default: + err = &NotSingularError{redeemcode.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *RedeemCodeQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of RedeemCodes. +func (_q *RedeemCodeQuery) All(ctx context.Context) ([]*RedeemCode, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*RedeemCode, *RedeemCodeQuery]() + return withInterceptors[[]*RedeemCode](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *RedeemCodeQuery) AllX(ctx context.Context) []*RedeemCode { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of RedeemCode IDs. +func (_q *RedeemCodeQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(redeemcode.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *RedeemCodeQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *RedeemCodeQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*RedeemCodeQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *RedeemCodeQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *RedeemCodeQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *RedeemCodeQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the RedeemCodeQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *RedeemCodeQuery) Clone() *RedeemCodeQuery { + if _q == nil { + return nil + } + return &RedeemCodeQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]redeemcode.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.RedeemCode{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *RedeemCodeQuery) WithUser(opts ...func(*UserQuery)) *RedeemCodeQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *RedeemCodeQuery) WithGroup(opts ...func(*GroupQuery)) *RedeemCodeQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.RedeemCode.Query(). +// GroupBy(redeemcode.FieldCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *RedeemCodeQuery) GroupBy(field string, fields ...string) *RedeemCodeGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &RedeemCodeGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = redeemcode.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// } +// +// client.RedeemCode.Query(). +// Select(redeemcode.FieldCode). +// Scan(ctx, &v) +func (_q *RedeemCodeQuery) Select(fields ...string) *RedeemCodeSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &RedeemCodeSelect{RedeemCodeQuery: _q} + sbuild.label = redeemcode.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a RedeemCodeSelect configured with the given aggregations. +func (_q *RedeemCodeQuery) Aggregate(fns ...AggregateFunc) *RedeemCodeSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *RedeemCodeQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !redeemcode.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *RedeemCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*RedeemCode, error) { + var ( + nodes = []*RedeemCode{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withUser != nil, + _q.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*RedeemCode).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &RedeemCode{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *RedeemCode, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *RedeemCode, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *RedeemCodeQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*RedeemCode, init func(*RedeemCode), assign func(*RedeemCode, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*RedeemCode) + for i := range nodes { + if nodes[i].UsedBy == nil { + continue + } + fk := *nodes[i].UsedBy + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "used_by" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *RedeemCodeQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*RedeemCode, init func(*RedeemCode), assign func(*RedeemCode, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*RedeemCode) + for i := range nodes { + if nodes[i].GroupID == nil { + continue + } + fk := *nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *RedeemCodeQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *RedeemCodeQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, redeemcode.FieldID) + for i := range fields { + if fields[i] != redeemcode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(redeemcode.FieldUsedBy) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(redeemcode.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *RedeemCodeQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(redeemcode.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = redeemcode.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// RedeemCodeGroupBy is the group-by builder for RedeemCode entities. +type RedeemCodeGroupBy struct { + selector + build *RedeemCodeQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *RedeemCodeGroupBy) Aggregate(fns ...AggregateFunc) *RedeemCodeGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *RedeemCodeGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RedeemCodeQuery, *RedeemCodeGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *RedeemCodeGroupBy) sqlScan(ctx context.Context, root *RedeemCodeQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// RedeemCodeSelect is the builder for selecting fields of RedeemCode entities. +type RedeemCodeSelect struct { + *RedeemCodeQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *RedeemCodeSelect) Aggregate(fns ...AggregateFunc) *RedeemCodeSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *RedeemCodeSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RedeemCodeQuery, *RedeemCodeSelect](ctx, _s.RedeemCodeQuery, _s, _s.inters, v) +} + +func (_s *RedeemCodeSelect) sqlScan(ctx context.Context, root *RedeemCodeQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/redeemcode_update.go b/backend/ent/redeemcode_update.go new file mode 100644 index 00000000..0f05e06d --- /dev/null +++ b/backend/ent/redeemcode_update.go @@ -0,0 +1,806 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCodeUpdate is the builder for updating RedeemCode entities. +type RedeemCodeUpdate struct { + config + hooks []Hook + mutation *RedeemCodeMutation +} + +// Where appends a list predicates to the RedeemCodeUpdate builder. +func (_u *RedeemCodeUpdate) Where(ps ...predicate.RedeemCode) *RedeemCodeUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetCode sets the "code" field. +func (_u *RedeemCodeUpdate) SetCode(v string) *RedeemCodeUpdate { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableCode(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *RedeemCodeUpdate) SetType(v string) *RedeemCodeUpdate { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableType(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *RedeemCodeUpdate) SetValue(v float64) *RedeemCodeUpdate { + _u.mutation.ResetValue() + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableValue(v *float64) *RedeemCodeUpdate { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// AddValue adds value to the "value" field. +func (_u *RedeemCodeUpdate) AddValue(v float64) *RedeemCodeUpdate { + _u.mutation.AddValue(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *RedeemCodeUpdate) SetStatus(v string) *RedeemCodeUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableStatus(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsedBy sets the "used_by" field. +func (_u *RedeemCodeUpdate) SetUsedBy(v int64) *RedeemCodeUpdate { + _u.mutation.SetUsedBy(v) + return _u +} + +// SetNillableUsedBy sets the "used_by" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableUsedBy(v *int64) *RedeemCodeUpdate { + if v != nil { + _u.SetUsedBy(*v) + } + return _u +} + +// ClearUsedBy clears the value of the "used_by" field. +func (_u *RedeemCodeUpdate) ClearUsedBy() *RedeemCodeUpdate { + _u.mutation.ClearUsedBy() + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *RedeemCodeUpdate) SetUsedAt(v time.Time) *RedeemCodeUpdate { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableUsedAt(v *time.Time) *RedeemCodeUpdate { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// ClearUsedAt clears the value of the "used_at" field. +func (_u *RedeemCodeUpdate) ClearUsedAt() *RedeemCodeUpdate { + _u.mutation.ClearUsedAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *RedeemCodeUpdate) SetNotes(v string) *RedeemCodeUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableNotes(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *RedeemCodeUpdate) ClearNotes() *RedeemCodeUpdate { + _u.mutation.ClearNotes() + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *RedeemCodeUpdate) SetGroupID(v int64) *RedeemCodeUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableGroupID(v *int64) *RedeemCodeUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *RedeemCodeUpdate) ClearGroupID() *RedeemCodeUpdate { + _u.mutation.ClearGroupID() + return _u +} + +// SetValidityDays sets the "validity_days" field. +func (_u *RedeemCodeUpdate) SetValidityDays(v int) *RedeemCodeUpdate { + _u.mutation.ResetValidityDays() + _u.mutation.SetValidityDays(v) + return _u +} + +// SetNillableValidityDays sets the "validity_days" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableValidityDays(v *int) *RedeemCodeUpdate { + if v != nil { + _u.SetValidityDays(*v) + } + return _u +} + +// AddValidityDays adds value to the "validity_days" field. +func (_u *RedeemCodeUpdate) AddValidityDays(v int) *RedeemCodeUpdate { + _u.mutation.AddValidityDays(v) + return _u +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (_u *RedeemCodeUpdate) SetUserID(id int64) *RedeemCodeUpdate { + _u.mutation.SetUserID(id) + return _u +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableUserID(id *int64) *RedeemCodeUpdate { + if id != nil { + _u = _u.SetUserID(*id) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *RedeemCodeUpdate) SetUser(v *User) *RedeemCodeUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *RedeemCodeUpdate) SetGroup(v *Group) *RedeemCodeUpdate { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the RedeemCodeMutation object of the builder. +func (_u *RedeemCodeUpdate) Mutation() *RedeemCodeMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *RedeemCodeUpdate) ClearUser() *RedeemCodeUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *RedeemCodeUpdate) ClearGroup() *RedeemCodeUpdate { + _u.mutation.ClearGroup() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *RedeemCodeUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *RedeemCodeUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *RedeemCodeUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *RedeemCodeUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *RedeemCodeUpdate) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := redeemcode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := redeemcode.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := redeemcode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)} + } + } + return nil +} + +func (_u *RedeemCodeUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(redeemcode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(redeemcode.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedValue(); ok { + _spec.AddField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(redeemcode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.UsedAtCleared() { + _spec.ClearField(redeemcode.FieldUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(redeemcode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(redeemcode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.ValidityDays(); ok { + _spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedValidityDays(); ok { + _spec.AddField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{redeemcode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// RedeemCodeUpdateOne is the builder for updating a single RedeemCode entity. +type RedeemCodeUpdateOne struct { + config + fields []string + hooks []Hook + mutation *RedeemCodeMutation +} + +// SetCode sets the "code" field. +func (_u *RedeemCodeUpdateOne) SetCode(v string) *RedeemCodeUpdateOne { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableCode(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *RedeemCodeUpdateOne) SetType(v string) *RedeemCodeUpdateOne { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableType(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *RedeemCodeUpdateOne) SetValue(v float64) *RedeemCodeUpdateOne { + _u.mutation.ResetValue() + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableValue(v *float64) *RedeemCodeUpdateOne { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// AddValue adds value to the "value" field. +func (_u *RedeemCodeUpdateOne) AddValue(v float64) *RedeemCodeUpdateOne { + _u.mutation.AddValue(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *RedeemCodeUpdateOne) SetStatus(v string) *RedeemCodeUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableStatus(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsedBy sets the "used_by" field. +func (_u *RedeemCodeUpdateOne) SetUsedBy(v int64) *RedeemCodeUpdateOne { + _u.mutation.SetUsedBy(v) + return _u +} + +// SetNillableUsedBy sets the "used_by" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableUsedBy(v *int64) *RedeemCodeUpdateOne { + if v != nil { + _u.SetUsedBy(*v) + } + return _u +} + +// ClearUsedBy clears the value of the "used_by" field. +func (_u *RedeemCodeUpdateOne) ClearUsedBy() *RedeemCodeUpdateOne { + _u.mutation.ClearUsedBy() + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *RedeemCodeUpdateOne) SetUsedAt(v time.Time) *RedeemCodeUpdateOne { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableUsedAt(v *time.Time) *RedeemCodeUpdateOne { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// ClearUsedAt clears the value of the "used_at" field. +func (_u *RedeemCodeUpdateOne) ClearUsedAt() *RedeemCodeUpdateOne { + _u.mutation.ClearUsedAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *RedeemCodeUpdateOne) SetNotes(v string) *RedeemCodeUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableNotes(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *RedeemCodeUpdateOne) ClearNotes() *RedeemCodeUpdateOne { + _u.mutation.ClearNotes() + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *RedeemCodeUpdateOne) SetGroupID(v int64) *RedeemCodeUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableGroupID(v *int64) *RedeemCodeUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *RedeemCodeUpdateOne) ClearGroupID() *RedeemCodeUpdateOne { + _u.mutation.ClearGroupID() + return _u +} + +// SetValidityDays sets the "validity_days" field. +func (_u *RedeemCodeUpdateOne) SetValidityDays(v int) *RedeemCodeUpdateOne { + _u.mutation.ResetValidityDays() + _u.mutation.SetValidityDays(v) + return _u +} + +// SetNillableValidityDays sets the "validity_days" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableValidityDays(v *int) *RedeemCodeUpdateOne { + if v != nil { + _u.SetValidityDays(*v) + } + return _u +} + +// AddValidityDays adds value to the "validity_days" field. +func (_u *RedeemCodeUpdateOne) AddValidityDays(v int) *RedeemCodeUpdateOne { + _u.mutation.AddValidityDays(v) + return _u +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (_u *RedeemCodeUpdateOne) SetUserID(id int64) *RedeemCodeUpdateOne { + _u.mutation.SetUserID(id) + return _u +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableUserID(id *int64) *RedeemCodeUpdateOne { + if id != nil { + _u = _u.SetUserID(*id) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *RedeemCodeUpdateOne) SetUser(v *User) *RedeemCodeUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *RedeemCodeUpdateOne) SetGroup(v *Group) *RedeemCodeUpdateOne { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the RedeemCodeMutation object of the builder. +func (_u *RedeemCodeUpdateOne) Mutation() *RedeemCodeMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *RedeemCodeUpdateOne) ClearUser() *RedeemCodeUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *RedeemCodeUpdateOne) ClearGroup() *RedeemCodeUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// Where appends a list predicates to the RedeemCodeUpdate builder. +func (_u *RedeemCodeUpdateOne) Where(ps ...predicate.RedeemCode) *RedeemCodeUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *RedeemCodeUpdateOne) Select(field string, fields ...string) *RedeemCodeUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated RedeemCode entity. +func (_u *RedeemCodeUpdateOne) Save(ctx context.Context) (*RedeemCode, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *RedeemCodeUpdateOne) SaveX(ctx context.Context) *RedeemCode { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *RedeemCodeUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *RedeemCodeUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *RedeemCodeUpdateOne) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := redeemcode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := redeemcode.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := redeemcode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)} + } + } + return nil +} + +func (_u *RedeemCodeUpdateOne) sqlSave(ctx context.Context) (_node *RedeemCode, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "RedeemCode.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, redeemcode.FieldID) + for _, f := range fields { + if !redeemcode.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != redeemcode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(redeemcode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(redeemcode.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedValue(); ok { + _spec.AddField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(redeemcode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.UsedAtCleared() { + _spec.ClearField(redeemcode.FieldUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(redeemcode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(redeemcode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.ValidityDays(); ok { + _spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedValidityDays(); ok { + _spec.AddField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &RedeemCode{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{redeemcode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/runtime.go b/backend/ent/runtime.go new file mode 100644 index 00000000..ee3195e2 --- /dev/null +++ b/backend/ent/runtime.go @@ -0,0 +1,5 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +// The schema-stitching logic is generated in github.com/Wei-Shaw/sub2api/ent/runtime/runtime.go diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go new file mode 100644 index 00000000..ef5e6bec --- /dev/null +++ b/backend/ent/runtime/runtime.go @@ -0,0 +1,562 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/schema" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + accountMixin := schema.Account{}.Mixin() + accountMixinHooks1 := accountMixin[1].Hooks() + account.Hooks[0] = accountMixinHooks1[0] + accountMixinInters1 := accountMixin[1].Interceptors() + account.Interceptors[0] = accountMixinInters1[0] + accountMixinFields0 := accountMixin[0].Fields() + _ = accountMixinFields0 + accountFields := schema.Account{}.Fields() + _ = accountFields + // accountDescCreatedAt is the schema descriptor for created_at field. + accountDescCreatedAt := accountMixinFields0[0].Descriptor() + // account.DefaultCreatedAt holds the default value on creation for the created_at field. + account.DefaultCreatedAt = accountDescCreatedAt.Default.(func() time.Time) + // accountDescUpdatedAt is the schema descriptor for updated_at field. + accountDescUpdatedAt := accountMixinFields0[1].Descriptor() + // account.DefaultUpdatedAt holds the default value on creation for the updated_at field. + account.DefaultUpdatedAt = accountDescUpdatedAt.Default.(func() time.Time) + // account.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + account.UpdateDefaultUpdatedAt = accountDescUpdatedAt.UpdateDefault.(func() time.Time) + // accountDescName is the schema descriptor for name field. + accountDescName := accountFields[0].Descriptor() + // account.NameValidator is a validator for the "name" field. It is called by the builders before save. + account.NameValidator = func() func(string) error { + validators := accountDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // accountDescPlatform is the schema descriptor for platform field. + accountDescPlatform := accountFields[1].Descriptor() + // account.PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + account.PlatformValidator = func() func(string) error { + validators := accountDescPlatform.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(platform string) error { + for _, fn := range fns { + if err := fn(platform); err != nil { + return err + } + } + return nil + } + }() + // accountDescType is the schema descriptor for type field. + accountDescType := accountFields[2].Descriptor() + // account.TypeValidator is a validator for the "type" field. It is called by the builders before save. + account.TypeValidator = func() func(string) error { + validators := accountDescType.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(_type string) error { + for _, fn := range fns { + if err := fn(_type); err != nil { + return err + } + } + return nil + } + }() + // accountDescCredentials is the schema descriptor for credentials field. + accountDescCredentials := accountFields[3].Descriptor() + // account.DefaultCredentials holds the default value on creation for the credentials field. + account.DefaultCredentials = accountDescCredentials.Default.(func() map[string]interface{}) + // accountDescExtra is the schema descriptor for extra field. + accountDescExtra := accountFields[4].Descriptor() + // account.DefaultExtra holds the default value on creation for the extra field. + account.DefaultExtra = accountDescExtra.Default.(func() map[string]interface{}) + // accountDescConcurrency is the schema descriptor for concurrency field. + accountDescConcurrency := accountFields[6].Descriptor() + // account.DefaultConcurrency holds the default value on creation for the concurrency field. + account.DefaultConcurrency = accountDescConcurrency.Default.(int) + // accountDescPriority is the schema descriptor for priority field. + accountDescPriority := accountFields[7].Descriptor() + // account.DefaultPriority holds the default value on creation for the priority field. + account.DefaultPriority = accountDescPriority.Default.(int) + // accountDescStatus is the schema descriptor for status field. + accountDescStatus := accountFields[8].Descriptor() + // account.DefaultStatus holds the default value on creation for the status field. + account.DefaultStatus = accountDescStatus.Default.(string) + // account.StatusValidator is a validator for the "status" field. It is called by the builders before save. + account.StatusValidator = accountDescStatus.Validators[0].(func(string) error) + // accountDescSchedulable is the schema descriptor for schedulable field. + accountDescSchedulable := accountFields[11].Descriptor() + // account.DefaultSchedulable holds the default value on creation for the schedulable field. + account.DefaultSchedulable = accountDescSchedulable.Default.(bool) + // accountDescSessionWindowStatus is the schema descriptor for session_window_status field. + accountDescSessionWindowStatus := accountFields[17].Descriptor() + // account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save. + account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error) + accountgroupFields := schema.AccountGroup{}.Fields() + _ = accountgroupFields + // accountgroupDescPriority is the schema descriptor for priority field. + accountgroupDescPriority := accountgroupFields[2].Descriptor() + // accountgroup.DefaultPriority holds the default value on creation for the priority field. + accountgroup.DefaultPriority = accountgroupDescPriority.Default.(int) + // accountgroupDescCreatedAt is the schema descriptor for created_at field. + accountgroupDescCreatedAt := accountgroupFields[3].Descriptor() + // accountgroup.DefaultCreatedAt holds the default value on creation for the created_at field. + accountgroup.DefaultCreatedAt = accountgroupDescCreatedAt.Default.(func() time.Time) + apikeyMixin := schema.ApiKey{}.Mixin() + apikeyMixinHooks1 := apikeyMixin[1].Hooks() + apikey.Hooks[0] = apikeyMixinHooks1[0] + apikeyMixinInters1 := apikeyMixin[1].Interceptors() + apikey.Interceptors[0] = apikeyMixinInters1[0] + apikeyMixinFields0 := apikeyMixin[0].Fields() + _ = apikeyMixinFields0 + apikeyFields := schema.ApiKey{}.Fields() + _ = apikeyFields + // apikeyDescCreatedAt is the schema descriptor for created_at field. + apikeyDescCreatedAt := apikeyMixinFields0[0].Descriptor() + // apikey.DefaultCreatedAt holds the default value on creation for the created_at field. + apikey.DefaultCreatedAt = apikeyDescCreatedAt.Default.(func() time.Time) + // apikeyDescUpdatedAt is the schema descriptor for updated_at field. + apikeyDescUpdatedAt := apikeyMixinFields0[1].Descriptor() + // apikey.DefaultUpdatedAt holds the default value on creation for the updated_at field. + apikey.DefaultUpdatedAt = apikeyDescUpdatedAt.Default.(func() time.Time) + // apikey.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + apikey.UpdateDefaultUpdatedAt = apikeyDescUpdatedAt.UpdateDefault.(func() time.Time) + // apikeyDescKey is the schema descriptor for key field. + apikeyDescKey := apikeyFields[1].Descriptor() + // apikey.KeyValidator is a validator for the "key" field. It is called by the builders before save. + apikey.KeyValidator = func() func(string) error { + validators := apikeyDescKey.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(key string) error { + for _, fn := range fns { + if err := fn(key); err != nil { + return err + } + } + return nil + } + }() + // apikeyDescName is the schema descriptor for name field. + apikeyDescName := apikeyFields[2].Descriptor() + // apikey.NameValidator is a validator for the "name" field. It is called by the builders before save. + apikey.NameValidator = func() func(string) error { + validators := apikeyDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // apikeyDescStatus is the schema descriptor for status field. + apikeyDescStatus := apikeyFields[4].Descriptor() + // apikey.DefaultStatus holds the default value on creation for the status field. + apikey.DefaultStatus = apikeyDescStatus.Default.(string) + // apikey.StatusValidator is a validator for the "status" field. It is called by the builders before save. + apikey.StatusValidator = apikeyDescStatus.Validators[0].(func(string) error) + groupMixin := schema.Group{}.Mixin() + groupMixinHooks1 := groupMixin[1].Hooks() + group.Hooks[0] = groupMixinHooks1[0] + groupMixinInters1 := groupMixin[1].Interceptors() + group.Interceptors[0] = groupMixinInters1[0] + groupMixinFields0 := groupMixin[0].Fields() + _ = groupMixinFields0 + groupFields := schema.Group{}.Fields() + _ = groupFields + // groupDescCreatedAt is the schema descriptor for created_at field. + groupDescCreatedAt := groupMixinFields0[0].Descriptor() + // group.DefaultCreatedAt holds the default value on creation for the created_at field. + group.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time) + // groupDescUpdatedAt is the schema descriptor for updated_at field. + groupDescUpdatedAt := groupMixinFields0[1].Descriptor() + // group.DefaultUpdatedAt holds the default value on creation for the updated_at field. + group.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time) + // group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + group.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time) + // groupDescName is the schema descriptor for name field. + groupDescName := groupFields[0].Descriptor() + // group.NameValidator is a validator for the "name" field. It is called by the builders before save. + group.NameValidator = func() func(string) error { + validators := groupDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // groupDescRateMultiplier is the schema descriptor for rate_multiplier field. + groupDescRateMultiplier := groupFields[2].Descriptor() + // group.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field. + group.DefaultRateMultiplier = groupDescRateMultiplier.Default.(float64) + // groupDescIsExclusive is the schema descriptor for is_exclusive field. + groupDescIsExclusive := groupFields[3].Descriptor() + // group.DefaultIsExclusive holds the default value on creation for the is_exclusive field. + group.DefaultIsExclusive = groupDescIsExclusive.Default.(bool) + // groupDescStatus is the schema descriptor for status field. + groupDescStatus := groupFields[4].Descriptor() + // group.DefaultStatus holds the default value on creation for the status field. + group.DefaultStatus = groupDescStatus.Default.(string) + // group.StatusValidator is a validator for the "status" field. It is called by the builders before save. + group.StatusValidator = groupDescStatus.Validators[0].(func(string) error) + // groupDescPlatform is the schema descriptor for platform field. + groupDescPlatform := groupFields[5].Descriptor() + // group.DefaultPlatform holds the default value on creation for the platform field. + group.DefaultPlatform = groupDescPlatform.Default.(string) + // group.PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + group.PlatformValidator = groupDescPlatform.Validators[0].(func(string) error) + // groupDescSubscriptionType is the schema descriptor for subscription_type field. + groupDescSubscriptionType := groupFields[6].Descriptor() + // group.DefaultSubscriptionType holds the default value on creation for the subscription_type field. + group.DefaultSubscriptionType = groupDescSubscriptionType.Default.(string) + // group.SubscriptionTypeValidator is a validator for the "subscription_type" field. It is called by the builders before save. + group.SubscriptionTypeValidator = groupDescSubscriptionType.Validators[0].(func(string) error) + proxyMixin := schema.Proxy{}.Mixin() + proxyMixinHooks1 := proxyMixin[1].Hooks() + proxy.Hooks[0] = proxyMixinHooks1[0] + proxyMixinInters1 := proxyMixin[1].Interceptors() + proxy.Interceptors[0] = proxyMixinInters1[0] + proxyMixinFields0 := proxyMixin[0].Fields() + _ = proxyMixinFields0 + proxyFields := schema.Proxy{}.Fields() + _ = proxyFields + // proxyDescCreatedAt is the schema descriptor for created_at field. + proxyDescCreatedAt := proxyMixinFields0[0].Descriptor() + // proxy.DefaultCreatedAt holds the default value on creation for the created_at field. + proxy.DefaultCreatedAt = proxyDescCreatedAt.Default.(func() time.Time) + // proxyDescUpdatedAt is the schema descriptor for updated_at field. + proxyDescUpdatedAt := proxyMixinFields0[1].Descriptor() + // proxy.DefaultUpdatedAt holds the default value on creation for the updated_at field. + proxy.DefaultUpdatedAt = proxyDescUpdatedAt.Default.(func() time.Time) + // proxy.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + proxy.UpdateDefaultUpdatedAt = proxyDescUpdatedAt.UpdateDefault.(func() time.Time) + // proxyDescName is the schema descriptor for name field. + proxyDescName := proxyFields[0].Descriptor() + // proxy.NameValidator is a validator for the "name" field. It is called by the builders before save. + proxy.NameValidator = func() func(string) error { + validators := proxyDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // proxyDescProtocol is the schema descriptor for protocol field. + proxyDescProtocol := proxyFields[1].Descriptor() + // proxy.ProtocolValidator is a validator for the "protocol" field. It is called by the builders before save. + proxy.ProtocolValidator = func() func(string) error { + validators := proxyDescProtocol.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(protocol string) error { + for _, fn := range fns { + if err := fn(protocol); err != nil { + return err + } + } + return nil + } + }() + // proxyDescHost is the schema descriptor for host field. + proxyDescHost := proxyFields[2].Descriptor() + // proxy.HostValidator is a validator for the "host" field. It is called by the builders before save. + proxy.HostValidator = func() func(string) error { + validators := proxyDescHost.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(host string) error { + for _, fn := range fns { + if err := fn(host); err != nil { + return err + } + } + return nil + } + }() + // proxyDescUsername is the schema descriptor for username field. + proxyDescUsername := proxyFields[4].Descriptor() + // proxy.UsernameValidator is a validator for the "username" field. It is called by the builders before save. + proxy.UsernameValidator = proxyDescUsername.Validators[0].(func(string) error) + // proxyDescPassword is the schema descriptor for password field. + proxyDescPassword := proxyFields[5].Descriptor() + // proxy.PasswordValidator is a validator for the "password" field. It is called by the builders before save. + proxy.PasswordValidator = proxyDescPassword.Validators[0].(func(string) error) + // proxyDescStatus is the schema descriptor for status field. + proxyDescStatus := proxyFields[6].Descriptor() + // proxy.DefaultStatus holds the default value on creation for the status field. + proxy.DefaultStatus = proxyDescStatus.Default.(string) + // proxy.StatusValidator is a validator for the "status" field. It is called by the builders before save. + proxy.StatusValidator = proxyDescStatus.Validators[0].(func(string) error) + redeemcodeFields := schema.RedeemCode{}.Fields() + _ = redeemcodeFields + // redeemcodeDescCode is the schema descriptor for code field. + redeemcodeDescCode := redeemcodeFields[0].Descriptor() + // redeemcode.CodeValidator is a validator for the "code" field. It is called by the builders before save. + redeemcode.CodeValidator = func() func(string) error { + validators := redeemcodeDescCode.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(code string) error { + for _, fn := range fns { + if err := fn(code); err != nil { + return err + } + } + return nil + } + }() + // redeemcodeDescType is the schema descriptor for type field. + redeemcodeDescType := redeemcodeFields[1].Descriptor() + // redeemcode.DefaultType holds the default value on creation for the type field. + redeemcode.DefaultType = redeemcodeDescType.Default.(string) + // redeemcode.TypeValidator is a validator for the "type" field. It is called by the builders before save. + redeemcode.TypeValidator = redeemcodeDescType.Validators[0].(func(string) error) + // redeemcodeDescValue is the schema descriptor for value field. + redeemcodeDescValue := redeemcodeFields[2].Descriptor() + // redeemcode.DefaultValue holds the default value on creation for the value field. + redeemcode.DefaultValue = redeemcodeDescValue.Default.(float64) + // redeemcodeDescStatus is the schema descriptor for status field. + redeemcodeDescStatus := redeemcodeFields[3].Descriptor() + // redeemcode.DefaultStatus holds the default value on creation for the status field. + redeemcode.DefaultStatus = redeemcodeDescStatus.Default.(string) + // redeemcode.StatusValidator is a validator for the "status" field. It is called by the builders before save. + redeemcode.StatusValidator = redeemcodeDescStatus.Validators[0].(func(string) error) + // redeemcodeDescCreatedAt is the schema descriptor for created_at field. + redeemcodeDescCreatedAt := redeemcodeFields[7].Descriptor() + // redeemcode.DefaultCreatedAt holds the default value on creation for the created_at field. + redeemcode.DefaultCreatedAt = redeemcodeDescCreatedAt.Default.(func() time.Time) + // redeemcodeDescValidityDays is the schema descriptor for validity_days field. + redeemcodeDescValidityDays := redeemcodeFields[9].Descriptor() + // redeemcode.DefaultValidityDays holds the default value on creation for the validity_days field. + redeemcode.DefaultValidityDays = redeemcodeDescValidityDays.Default.(int) + settingFields := schema.Setting{}.Fields() + _ = settingFields + // settingDescKey is the schema descriptor for key field. + settingDescKey := settingFields[0].Descriptor() + // setting.KeyValidator is a validator for the "key" field. It is called by the builders before save. + setting.KeyValidator = func() func(string) error { + validators := settingDescKey.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(key string) error { + for _, fn := range fns { + if err := fn(key); err != nil { + return err + } + } + return nil + } + }() + // settingDescValue is the schema descriptor for value field. + settingDescValue := settingFields[1].Descriptor() + // setting.ValueValidator is a validator for the "value" field. It is called by the builders before save. + setting.ValueValidator = settingDescValue.Validators[0].(func(string) error) + // settingDescUpdatedAt is the schema descriptor for updated_at field. + settingDescUpdatedAt := settingFields[2].Descriptor() + // setting.DefaultUpdatedAt holds the default value on creation for the updated_at field. + setting.DefaultUpdatedAt = settingDescUpdatedAt.Default.(func() time.Time) + // setting.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + setting.UpdateDefaultUpdatedAt = settingDescUpdatedAt.UpdateDefault.(func() time.Time) + userMixin := schema.User{}.Mixin() + userMixinHooks1 := userMixin[1].Hooks() + user.Hooks[0] = userMixinHooks1[0] + userMixinInters1 := userMixin[1].Interceptors() + user.Interceptors[0] = userMixinInters1[0] + userMixinFields0 := userMixin[0].Fields() + _ = userMixinFields0 + userFields := schema.User{}.Fields() + _ = userFields + // userDescCreatedAt is the schema descriptor for created_at field. + userDescCreatedAt := userMixinFields0[0].Descriptor() + // user.DefaultCreatedAt holds the default value on creation for the created_at field. + user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time) + // userDescUpdatedAt is the schema descriptor for updated_at field. + userDescUpdatedAt := userMixinFields0[1].Descriptor() + // user.DefaultUpdatedAt holds the default value on creation for the updated_at field. + user.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time) + // user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + user.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time) + // userDescEmail is the schema descriptor for email field. + userDescEmail := userFields[0].Descriptor() + // user.EmailValidator is a validator for the "email" field. It is called by the builders before save. + user.EmailValidator = func() func(string) error { + validators := userDescEmail.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(email string) error { + for _, fn := range fns { + if err := fn(email); err != nil { + return err + } + } + return nil + } + }() + // userDescPasswordHash is the schema descriptor for password_hash field. + userDescPasswordHash := userFields[1].Descriptor() + // user.PasswordHashValidator is a validator for the "password_hash" field. It is called by the builders before save. + user.PasswordHashValidator = func() func(string) error { + validators := userDescPasswordHash.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(password_hash string) error { + for _, fn := range fns { + if err := fn(password_hash); err != nil { + return err + } + } + return nil + } + }() + // userDescRole is the schema descriptor for role field. + userDescRole := userFields[2].Descriptor() + // user.DefaultRole holds the default value on creation for the role field. + user.DefaultRole = userDescRole.Default.(string) + // user.RoleValidator is a validator for the "role" field. It is called by the builders before save. + user.RoleValidator = userDescRole.Validators[0].(func(string) error) + // userDescBalance is the schema descriptor for balance field. + userDescBalance := userFields[3].Descriptor() + // user.DefaultBalance holds the default value on creation for the balance field. + user.DefaultBalance = userDescBalance.Default.(float64) + // userDescConcurrency is the schema descriptor for concurrency field. + userDescConcurrency := userFields[4].Descriptor() + // user.DefaultConcurrency holds the default value on creation for the concurrency field. + user.DefaultConcurrency = userDescConcurrency.Default.(int) + // userDescStatus is the schema descriptor for status field. + userDescStatus := userFields[5].Descriptor() + // user.DefaultStatus holds the default value on creation for the status field. + user.DefaultStatus = userDescStatus.Default.(string) + // user.StatusValidator is a validator for the "status" field. It is called by the builders before save. + user.StatusValidator = userDescStatus.Validators[0].(func(string) error) + // userDescUsername is the schema descriptor for username field. + userDescUsername := userFields[6].Descriptor() + // user.DefaultUsername holds the default value on creation for the username field. + user.DefaultUsername = userDescUsername.Default.(string) + // user.UsernameValidator is a validator for the "username" field. It is called by the builders before save. + user.UsernameValidator = userDescUsername.Validators[0].(func(string) error) + // userDescWechat is the schema descriptor for wechat field. + userDescWechat := userFields[7].Descriptor() + // user.DefaultWechat holds the default value on creation for the wechat field. + user.DefaultWechat = userDescWechat.Default.(string) + // user.WechatValidator is a validator for the "wechat" field. It is called by the builders before save. + user.WechatValidator = userDescWechat.Validators[0].(func(string) error) + // userDescNotes is the schema descriptor for notes field. + userDescNotes := userFields[8].Descriptor() + // user.DefaultNotes holds the default value on creation for the notes field. + user.DefaultNotes = userDescNotes.Default.(string) + userallowedgroupFields := schema.UserAllowedGroup{}.Fields() + _ = userallowedgroupFields + // userallowedgroupDescCreatedAt is the schema descriptor for created_at field. + userallowedgroupDescCreatedAt := userallowedgroupFields[2].Descriptor() + // userallowedgroup.DefaultCreatedAt holds the default value on creation for the created_at field. + userallowedgroup.DefaultCreatedAt = userallowedgroupDescCreatedAt.Default.(func() time.Time) + usersubscriptionMixin := schema.UserSubscription{}.Mixin() + usersubscriptionMixinFields0 := usersubscriptionMixin[0].Fields() + _ = usersubscriptionMixinFields0 + usersubscriptionFields := schema.UserSubscription{}.Fields() + _ = usersubscriptionFields + // usersubscriptionDescCreatedAt is the schema descriptor for created_at field. + usersubscriptionDescCreatedAt := usersubscriptionMixinFields0[0].Descriptor() + // usersubscription.DefaultCreatedAt holds the default value on creation for the created_at field. + usersubscription.DefaultCreatedAt = usersubscriptionDescCreatedAt.Default.(func() time.Time) + // usersubscriptionDescUpdatedAt is the schema descriptor for updated_at field. + usersubscriptionDescUpdatedAt := usersubscriptionMixinFields0[1].Descriptor() + // usersubscription.DefaultUpdatedAt holds the default value on creation for the updated_at field. + usersubscription.DefaultUpdatedAt = usersubscriptionDescUpdatedAt.Default.(func() time.Time) + // usersubscription.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + usersubscription.UpdateDefaultUpdatedAt = usersubscriptionDescUpdatedAt.UpdateDefault.(func() time.Time) + // usersubscriptionDescStatus is the schema descriptor for status field. + usersubscriptionDescStatus := usersubscriptionFields[4].Descriptor() + // usersubscription.DefaultStatus holds the default value on creation for the status field. + usersubscription.DefaultStatus = usersubscriptionDescStatus.Default.(string) + // usersubscription.StatusValidator is a validator for the "status" field. It is called by the builders before save. + usersubscription.StatusValidator = usersubscriptionDescStatus.Validators[0].(func(string) error) + // usersubscriptionDescDailyUsageUsd is the schema descriptor for daily_usage_usd field. + usersubscriptionDescDailyUsageUsd := usersubscriptionFields[8].Descriptor() + // usersubscription.DefaultDailyUsageUsd holds the default value on creation for the daily_usage_usd field. + usersubscription.DefaultDailyUsageUsd = usersubscriptionDescDailyUsageUsd.Default.(float64) + // usersubscriptionDescWeeklyUsageUsd is the schema descriptor for weekly_usage_usd field. + usersubscriptionDescWeeklyUsageUsd := usersubscriptionFields[9].Descriptor() + // usersubscription.DefaultWeeklyUsageUsd holds the default value on creation for the weekly_usage_usd field. + usersubscription.DefaultWeeklyUsageUsd = usersubscriptionDescWeeklyUsageUsd.Default.(float64) + // usersubscriptionDescMonthlyUsageUsd is the schema descriptor for monthly_usage_usd field. + usersubscriptionDescMonthlyUsageUsd := usersubscriptionFields[10].Descriptor() + // usersubscription.DefaultMonthlyUsageUsd holds the default value on creation for the monthly_usage_usd field. + usersubscription.DefaultMonthlyUsageUsd = usersubscriptionDescMonthlyUsageUsd.Default.(float64) + // usersubscriptionDescAssignedAt is the schema descriptor for assigned_at field. + usersubscriptionDescAssignedAt := usersubscriptionFields[12].Descriptor() + // usersubscription.DefaultAssignedAt holds the default value on creation for the assigned_at field. + usersubscription.DefaultAssignedAt = usersubscriptionDescAssignedAt.Default.(func() time.Time) +} + +const ( + Version = "v0.14.5" // Version of ent codegen. + Sum = "h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4=" // Sum of ent codegen. +) diff --git a/backend/ent/schema/account.go b/backend/ent/schema/account.go new file mode 100644 index 00000000..bd929693 --- /dev/null +++ b/backend/ent/schema/account.go @@ -0,0 +1,190 @@ +// Package schema 定义 Ent ORM 的数据库 schema。 +// 每个文件对应一个数据库实体(表),定义其字段、边(关联)和索引。 +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Account 定义 AI API 账户实体的 schema。 +// +// 账户是系统的核心资源,代表一个可用于调用 AI API 的凭证。 +// 例如:一个 Claude API 账户、一个 Gemini OAuth 账户等。 +// +// 主要功能: +// - 存储不同平台(Claude、Gemini、OpenAI 等)的 API 凭证 +// - 支持多种认证类型(api_key、oauth、cookie 等) +// - 管理账户的调度状态(可调度、速率限制、过载等) +// - 通过分组机制实现账户的灵活分配 +type Account struct { + ent.Schema +} + +// Annotations 返回 schema 的注解配置。 +// 这里指定数据库表名为 "accounts"。 +func (Account) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "accounts"}, + } +} + +// Mixin 返回该 schema 使用的混入组件。 +// - TimeMixin: 自动管理 created_at 和 updated_at 时间戳 +// - SoftDeleteMixin: 提供软删除功能(deleted_at) +func (Account) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +// Fields 定义账户实体的所有字段。 +func (Account) Fields() []ent.Field { + return []ent.Field{ + // name: 账户显示名称,用于在界面中标识账户 + field.String("name"). + MaxLen(100). + NotEmpty(), + + // platform: 所属平台,如 "claude", "gemini", "openai" 等 + field.String("platform"). + MaxLen(50). + NotEmpty(), + + // type: 认证类型,如 "api_key", "oauth", "cookie" 等 + // 不同类型决定了 credentials 中存储的数据结构 + field.String("type"). + MaxLen(20). + NotEmpty(), + + // credentials: 认证凭证,以 JSONB 格式存储 + // 结构取决于 type 字段: + // - api_key: {"api_key": "sk-xxx"} + // - oauth: {"access_token": "...", "refresh_token": "...", "expires_at": "..."} + // - cookie: {"session_key": "..."} + field.JSON("credentials", map[string]any{}). + Default(func() map[string]any { return map[string]any{} }). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // extra: 扩展数据,存储平台特定的额外信息 + // 如 CRS 账户的 crs_account_id、组织信息等 + field.JSON("extra", map[string]any{}). + Default(func() map[string]any { return map[string]any{} }). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // proxy_id: 关联的代理配置 ID(可选) + // 用于需要通过特定代理访问 API 的场景 + field.Int64("proxy_id"). + Optional(). + Nillable(), + + // concurrency: 账户最大并发请求数 + // 用于限制同一时间对该账户发起的请求数量 + field.Int("concurrency"). + Default(3), + + // priority: 账户优先级,数值越小优先级越高 + // 调度器会优先使用高优先级的账户 + field.Int("priority"). + Default(50), + + // status: 账户状态,如 "active", "error", "disabled" + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + + // error_message: 错误信息,记录账户异常时的详细信息 + field.String("error_message"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + + // last_used_at: 最后使用时间,用于统计和调度 + field.Time("last_used_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // ========== 调度和速率限制相关字段 ========== + // 这些字段在 migrations/005_schema_parity.sql 中添加 + + // schedulable: 是否可被调度器选中 + // false 表示账户暂时不参与请求分配(如正在刷新 token) + field.Bool("schedulable"). + Default(true), + + // rate_limited_at: 触发速率限制的时间 + // 当收到 429 错误时记录 + field.Time("rate_limited_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // rate_limit_reset_at: 速率限制预计解除的时间 + // 调度器会在此时间之前避免使用该账户 + field.Time("rate_limit_reset_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // overload_until: 过载状态解除时间 + // 当收到 529 错误(API 过载)时设置 + field.Time("overload_until"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // session_window_*: 会话窗口相关字段 + // 用于管理某些需要会话时间窗口的 API(如 Claude Pro) + field.Time("session_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("session_window_end"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("session_window_status"). + Optional(). + Nillable(). + MaxLen(20), + } +} + +// Edges 定义账户实体的关联关系。 +func (Account) Edges() []ent.Edge { + return []ent.Edge{ + // groups: 账户所属的分组(多对多关系) + // 通过 account_groups 中间表实现 + // 一个账户可以属于多个分组,一个分组可以包含多个账户 + edge.To("groups", Group.Type). + Through("account_groups", AccountGroup.Type), + } +} + +// Indexes 定义数据库索引,优化查询性能。 +// 每个索引对应一个常用的查询条件。 +func (Account) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("platform"), // 按平台筛选 + index.Fields("type"), // 按认证类型筛选 + index.Fields("status"), // 按状态筛选 + index.Fields("proxy_id"), // 按代理筛选 + index.Fields("priority"), // 按优先级排序 + index.Fields("last_used_at"), // 按最后使用时间排序 + index.Fields("schedulable"), // 筛选可调度账户 + index.Fields("rate_limited_at"), // 筛选速率限制账户 + index.Fields("rate_limit_reset_at"), // 筛选速率限制解除时间 + index.Fields("overload_until"), // 筛选过载账户 + index.Fields("deleted_at"), // 软删除查询优化 + } +} diff --git a/backend/ent/schema/account_group.go b/backend/ent/schema/account_group.go new file mode 100644 index 00000000..66729752 --- /dev/null +++ b/backend/ent/schema/account_group.go @@ -0,0 +1,58 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// AccountGroup holds the edge schema definition for the account_groups relationship. +// It stores extra fields (priority, created_at) and uses a composite primary key. +type AccountGroup struct { + ent.Schema +} + +func (AccountGroup) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "account_groups"}, + // Composite primary key: (account_id, group_id). + field.ID("account_id", "group_id"), + } +} + +func (AccountGroup) Fields() []ent.Field { + return []ent.Field{ + field.Int64("account_id"), + field.Int64("group_id"), + field.Int("priority"). + Default(50), + field.Time("created_at"). + Immutable(). + Default(time.Now), + } +} + +func (AccountGroup) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("account", Account.Type). + Unique(). + Required(). + Field("account_id"), + edge.To("group", Group.Type). + Unique(). + Required(). + Field("group_id"), + } +} + +func (AccountGroup) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("group_id"), + index.Fields("priority"), + } +} diff --git a/backend/ent/schema/api_key.go b/backend/ent/schema/api_key.go new file mode 100644 index 00000000..0f0f830e --- /dev/null +++ b/backend/ent/schema/api_key.go @@ -0,0 +1,74 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// ApiKey holds the schema definition for the ApiKey entity. +type ApiKey struct { + ent.Schema +} + +func (ApiKey) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "api_keys"}, + } +} + +func (ApiKey) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (ApiKey) Fields() []ent.Field { + return []ent.Field{ + field.Int64("user_id"), + field.String("key"). + MaxLen(128). + NotEmpty(). + Unique(), + field.String("name"). + MaxLen(100). + NotEmpty(), + field.Int64("group_id"). + Optional(). + Nillable(), + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + } +} + +func (ApiKey) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("api_keys"). + Field("user_id"). + Unique(). + Required(), + edge.From("group", Group.Type). + Ref("api_keys"). + Field("group_id"). + Unique(), + } +} + +func (ApiKey) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("key").Unique(), + index.Fields("user_id"), + index.Fields("group_id"), + index.Fields("status"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go new file mode 100644 index 00000000..2c30c979 --- /dev/null +++ b/backend/ent/schema/group.go @@ -0,0 +1,98 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Group holds the schema definition for the Group entity. +type Group struct { + ent.Schema +} + +func (Group) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "groups"}, + } +} + +func (Group) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (Group) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + MaxLen(100). + NotEmpty(). + Unique(), + field.String("description"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + field.Float("rate_multiplier"). + SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}). + Default(1.0), + field.Bool("is_exclusive"). + Default(false), + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + + // Subscription-related fields (added by migration 003) + field.String("platform"). + MaxLen(50). + Default(service.PlatformAnthropic), + field.String("subscription_type"). + MaxLen(20). + Default(service.SubscriptionTypeStandard), + field.Float("daily_limit_usd"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + field.Float("weekly_limit_usd"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + field.Float("monthly_limit_usd"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + } +} + +func (Group) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("api_keys", ApiKey.Type), + edge.To("redeem_codes", RedeemCode.Type), + edge.To("subscriptions", UserSubscription.Type), + edge.From("accounts", Account.Type). + Ref("groups"). + Through("account_groups", AccountGroup.Type), + edge.From("allowed_users", User.Type). + Ref("allowed_groups"). + Through("user_allowed_groups", UserAllowedGroup.Type), + } +} + +func (Group) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("name").Unique(), + index.Fields("status"), + index.Fields("platform"), + index.Fields("subscription_type"), + index.Fields("is_exclusive"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/mixins/soft_delete.go b/backend/ent/schema/mixins/soft_delete.go new file mode 100644 index 00000000..03a3899d --- /dev/null +++ b/backend/ent/schema/mixins/soft_delete.go @@ -0,0 +1,142 @@ +// Package mixins 提供 Ent schema 的可复用混入组件。 +// 包括时间戳混入、软删除混入等通用功能。 +package mixins + +import ( + "context" + "fmt" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" +) + +// SoftDeleteMixin 实现基于 deleted_at 时间戳的软删除功能。 +// +// 软删除特性: +// - 删除操作不会真正删除数据库记录,而是设置 deleted_at 时间戳 +// - 所有查询默认自动过滤 deleted_at IS NULL,只返回"未删除"的记录 +// - 通过 SkipSoftDelete(ctx) 可以绕过软删除过滤器,查询或真正删除记录 +// +// 实现原理: +// - 使用 Ent 的 Interceptor 拦截所有查询,自动添加 deleted_at IS NULL 条件 +// - 使用 Ent 的 Hook 拦截删除操作,将 DELETE 转换为 UPDATE SET deleted_at = NOW() +// +// 使用示例: +// +// func (User) Mixin() []ent.Mixin { +// return []ent.Mixin{ +// mixins.SoftDeleteMixin{}, +// } +// } +type SoftDeleteMixin struct { + mixin.Schema +} + +// Fields 定义软删除所需的字段。 +// deleted_at 字段: +// - 类型为 TIMESTAMPTZ,精确记录删除时间 +// - Optional 和 Nillable 确保新记录时该字段为 NULL +// - NULL 表示记录未被删除,非 NULL 表示已软删除 +func (SoftDeleteMixin) Fields() []ent.Field { + return []ent.Field{ + field.Time("deleted_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + } +} + +// softDeleteKey 是用于在 context 中标记跳过软删除的键类型。 +// 使用空结构体作为键可以避免与其他包的键冲突。 +type softDeleteKey struct{} + +// SkipSoftDelete 返回一个新的 context,用于跳过软删除的拦截器和变更器。 +// +// 使用场景: +// - 查询已软删除的记录(如管理员查看回收站) +// - 执行真正的物理删除(如彻底清理数据) +// - 恢复软删除的记录 +// +// 示例: +// +// // 查询包含已删除记录的所有用户 +// users, err := client.User.Query().All(mixins.SkipSoftDelete(ctx)) +// +// // 真正删除记录 +// client.User.DeleteOneID(id).Exec(mixins.SkipSoftDelete(ctx)) +func SkipSoftDelete(parent context.Context) context.Context { + return context.WithValue(parent, softDeleteKey{}, true) +} + +// Interceptors 返回查询拦截器列表。 +// 拦截器会自动为所有查询添加 deleted_at IS NULL 条件, +// 确保软删除的记录不会出现在普通查询结果中。 +func (d SoftDeleteMixin) Interceptors() []ent.Interceptor { + return []ent.Interceptor{ + ent.TraverseFunc(func(ctx context.Context, q ent.Query) error { + // 检查是否需要跳过软删除过滤 + if skip, _ := ctx.Value(softDeleteKey{}).(bool); skip { + return nil + } + // 为查询添加 deleted_at IS NULL 条件 + w, ok := q.(interface{ WhereP(...func(*sql.Selector)) }) + if ok { + d.applyPredicate(w) + } + return nil + }), + } +} + +// Hooks 返回变更钩子列表。 +// 钩子会拦截 DELETE 操作,将其转换为 UPDATE SET deleted_at = NOW()。 +// 这样删除操作实际上只是标记记录为已删除,而不是真正删除。 +func (d SoftDeleteMixin) Hooks() []ent.Hook { + return []ent.Hook{ + func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + // 只处理删除操作 + if m.Op() != ent.OpDelete && m.Op() != ent.OpDeleteOne { + return next.Mutate(ctx, m) + } + // 检查是否需要执行真正的删除 + if skip, _ := ctx.Value(softDeleteKey{}).(bool); skip { + return next.Mutate(ctx, m) + } + // 类型断言,获取 mutation 的扩展接口 + mx, ok := m.(interface { + SetOp(ent.Op) + Client() interface { + Mutate(context.Context, ent.Mutation) (ent.Value, error) + } + SetDeletedAt(time.Time) + WhereP(...func(*sql.Selector)) + }) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // 添加软删除过滤条件,确保不会影响已删除的记录 + d.applyPredicate(mx) + // 将 DELETE 操作转换为 UPDATE 操作 + mx.SetOp(ent.OpUpdate) + // 设置删除时间为当前时间 + mx.SetDeletedAt(time.Now()) + return mx.Client().Mutate(ctx, m) + }) + }, + } +} + +// applyPredicate 为查询添加 deleted_at IS NULL 条件。 +// 这是软删除过滤的核心实现。 +func (d SoftDeleteMixin) applyPredicate(w interface{ WhereP(...func(*sql.Selector)) }) { + w.WhereP( + sql.FieldIsNull(d.Fields()[0].Descriptor().Name), + ) +} diff --git a/backend/ent/schema/mixins/time.go b/backend/ent/schema/mixins/time.go new file mode 100644 index 00000000..30ecf273 --- /dev/null +++ b/backend/ent/schema/mixins/time.go @@ -0,0 +1,32 @@ +package mixins + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" +) + +// TimeMixin provides created_at and updated_at fields compatible with the existing schema. +type TimeMixin struct { + mixin.Schema +} + +func (TimeMixin) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + } +} diff --git a/backend/ent/schema/proxy.go b/backend/ent/schema/proxy.go new file mode 100644 index 00000000..45608c96 --- /dev/null +++ b/backend/ent/schema/proxy.go @@ -0,0 +1,62 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Proxy holds the schema definition for the Proxy entity. +type Proxy struct { + ent.Schema +} + +func (Proxy) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "proxies"}, + } +} + +func (Proxy) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (Proxy) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + MaxLen(100). + NotEmpty(), + field.String("protocol"). + MaxLen(20). + NotEmpty(), + field.String("host"). + MaxLen(255). + NotEmpty(), + field.Int("port"), + field.String("username"). + MaxLen(100). + Optional(). + Nillable(), + field.String("password"). + MaxLen(100). + Optional(). + Nillable(), + field.String("status"). + MaxLen(20). + Default("active"), + } +} + +func (Proxy) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("status"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/redeem_code.go b/backend/ent/schema/redeem_code.go new file mode 100644 index 00000000..0ecb48b7 --- /dev/null +++ b/backend/ent/schema/redeem_code.go @@ -0,0 +1,86 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// RedeemCode holds the schema definition for the RedeemCode entity. +type RedeemCode struct { + ent.Schema +} + +func (RedeemCode) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "redeem_codes"}, + } +} + +func (RedeemCode) Fields() []ent.Field { + return []ent.Field{ + field.String("code"). + MaxLen(32). + NotEmpty(). + Unique(), + field.String("type"). + MaxLen(20). + Default(service.RedeemTypeBalance), + field.Float("value"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Default(0), + field.String("status"). + MaxLen(20). + Default(service.StatusUnused), + field.Int64("used_by"). + Optional(). + Nillable(), + field.Time("used_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("notes"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Int64("group_id"). + Optional(). + Nillable(), + field.Int("validity_days"). + Default(30), + } +} + +func (RedeemCode) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("redeem_codes"). + Field("used_by"). + Unique(), + edge.From("group", Group.Type). + Ref("redeem_codes"). + Field("group_id"). + Unique(), + } +} + +func (RedeemCode) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("code").Unique(), + index.Fields("status"), + index.Fields("used_by"), + index.Fields("group_id"), + } +} diff --git a/backend/ent/schema/setting.go b/backend/ent/schema/setting.go new file mode 100644 index 00000000..f31f2a41 --- /dev/null +++ b/backend/ent/schema/setting.go @@ -0,0 +1,49 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Setting holds the schema definition for the Setting entity. +type Setting struct { + ent.Schema +} + +func (Setting) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "settings"}, + } +} + +func (Setting) Fields() []ent.Field { + return []ent.Field{ + field.String("key"). + MaxLen(100). + NotEmpty(). + Unique(), + field.String("value"). + NotEmpty(). + SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + } +} + +func (Setting) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("key").Unique(), + } +} diff --git a/backend/ent/schema/user.go b/backend/ent/schema/user.go new file mode 100644 index 00000000..e76799ed --- /dev/null +++ b/backend/ent/schema/user.go @@ -0,0 +1,85 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// User holds the schema definition for the User entity. +type User struct { + ent.Schema +} + +func (User) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "users"}, + } +} + +func (User) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (User) Fields() []ent.Field { + return []ent.Field{ + field.String("email"). + MaxLen(255). + NotEmpty(). + Unique(), + field.String("password_hash"). + MaxLen(255). + NotEmpty(), + field.String("role"). + MaxLen(20). + Default(service.RoleUser), + field.Float("balance"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Default(0), + field.Int("concurrency"). + Default(5), + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + + // Optional profile fields (added later; default '' in DB migration) + field.String("username"). + MaxLen(100). + Default(""), + field.String("wechat"). + MaxLen(100). + Default(""), + field.String("notes"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Default(""), + } +} + +func (User) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("api_keys", ApiKey.Type), + edge.To("redeem_codes", RedeemCode.Type), + edge.To("subscriptions", UserSubscription.Type), + edge.To("assigned_subscriptions", UserSubscription.Type), + edge.To("allowed_groups", Group.Type). + Through("user_allowed_groups", UserAllowedGroup.Type), + } +} + +func (User) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("email").Unique(), + index.Fields("status"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/user_allowed_group.go b/backend/ent/schema/user_allowed_group.go new file mode 100644 index 00000000..8fce97c2 --- /dev/null +++ b/backend/ent/schema/user_allowed_group.go @@ -0,0 +1,55 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserAllowedGroup holds the edge schema definition for the user_allowed_groups relationship. +// It replaces the legacy users.allowed_groups BIGINT[] column. +type UserAllowedGroup struct { + ent.Schema +} + +func (UserAllowedGroup) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_allowed_groups"}, + // Composite primary key: (user_id, group_id). + field.ID("user_id", "group_id"), + } +} + +func (UserAllowedGroup) Fields() []ent.Field { + return []ent.Field{ + field.Int64("user_id"), + field.Int64("group_id"), + field.Time("created_at"). + Immutable(). + Default(time.Now), + } +} + +func (UserAllowedGroup) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("user", User.Type). + Unique(). + Required(). + Field("user_id"), + edge.To("group", Group.Type). + Unique(). + Required(). + Field("group_id"), + } +} + +func (UserAllowedGroup) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("group_id"), + } +} diff --git a/backend/ent/schema/user_subscription.go b/backend/ent/schema/user_subscription.go new file mode 100644 index 00000000..a87e4c39 --- /dev/null +++ b/backend/ent/schema/user_subscription.go @@ -0,0 +1,113 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserSubscription holds the schema definition for the UserSubscription entity. +type UserSubscription struct { + ent.Schema +} + +func (UserSubscription) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_subscriptions"}, + } +} + +func (UserSubscription) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + } +} + +func (UserSubscription) Fields() []ent.Field { + return []ent.Field{ + field.Int64("user_id"), + field.Int64("group_id"), + + field.Time("starts_at"). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("expires_at"). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("status"). + MaxLen(20). + Default(service.SubscriptionStatusActive), + + field.Time("daily_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("weekly_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("monthly_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + field.Float("daily_usage_usd"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}). + Default(0), + field.Float("weekly_usage_usd"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}). + Default(0), + field.Float("monthly_usage_usd"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}). + Default(0), + + field.Int64("assigned_by"). + Optional(). + Nillable(), + field.Time("assigned_at"). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("notes"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + } +} + +func (UserSubscription) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("subscriptions"). + Field("user_id"). + Unique(). + Required(), + edge.From("group", Group.Type). + Ref("subscriptions"). + Field("group_id"). + Unique(). + Required(), + edge.From("assigned_by_user", User.Type). + Ref("assigned_subscriptions"). + Field("assigned_by"). + Unique(), + } +} + +func (UserSubscription) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("user_id"), + index.Fields("group_id"), + index.Fields("status"), + index.Fields("expires_at"), + index.Fields("assigned_by"), + index.Fields("user_id", "group_id").Unique(), + } +} + diff --git a/backend/ent/setting.go b/backend/ent/setting.go new file mode 100644 index 00000000..08ce81e4 --- /dev/null +++ b/backend/ent/setting.go @@ -0,0 +1,128 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// Setting is the model entity for the Setting schema. +type Setting struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Setting) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case setting.FieldID: + values[i] = new(sql.NullInt64) + case setting.FieldKey, setting.FieldValue: + values[i] = new(sql.NullString) + case setting.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Setting fields. +func (_m *Setting) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case setting.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case setting.FieldKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[i]) + } else if value.Valid { + _m.Key = value.String + } + case setting.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + _m.Value = value.String + } + case setting.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the Setting. +// This includes values selected through modifiers, order, etc. +func (_m *Setting) GetValue(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this Setting. +// Note that you need to call Setting.Unwrap() before calling this method if this Setting +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Setting) Update() *SettingUpdateOne { + return NewSettingClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Setting entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Setting) Unwrap() *Setting { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Setting is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Setting) String() string { + var builder strings.Builder + builder.WriteString("Setting(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("key=") + builder.WriteString(_m.Key) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(_m.Value) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Settings is a parsable slice of Setting. +type Settings []*Setting diff --git a/backend/ent/setting/setting.go b/backend/ent/setting/setting.go new file mode 100644 index 00000000..feb86b87 --- /dev/null +++ b/backend/ent/setting/setting.go @@ -0,0 +1,76 @@ +// Code generated by ent, DO NOT EDIT. + +package setting + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the setting type in the database. + Label = "setting" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // Table holds the table name of the setting in the database. + Table = "settings" +) + +// Columns holds all SQL columns for setting fields. +var Columns = []string{ + FieldID, + FieldKey, + FieldValue, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // KeyValidator is a validator for the "key" field. It is called by the builders before save. + KeyValidator func(string) error + // ValueValidator is a validator for the "value" field. It is called by the builders before save. + ValueValidator func(string) error + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Setting queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} diff --git a/backend/ent/setting/where.go b/backend/ent/setting/where.go new file mode 100644 index 00000000..23343e9e --- /dev/null +++ b/backend/ent/setting/where.go @@ -0,0 +1,255 @@ +// Code generated by ent, DO NOT EDIT. + +package setting + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldID, id)) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldKey, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldValue, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldKey, v)) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldKey, v)) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldKey, vs...)) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldKey, vs...)) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldKey, v)) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldKey, v)) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldKey, v)) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldKey, v)) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.Setting { + return predicate.Setting(sql.FieldContains(FieldKey, v)) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasPrefix(FieldKey, v)) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasSuffix(FieldKey, v)) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldEqualFold(FieldKey, v)) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldContainsFold(FieldKey, v)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldValue, v)) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.Setting { + return predicate.Setting(sql.FieldContains(FieldValue, v)) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasPrefix(FieldValue, v)) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasSuffix(FieldValue, v)) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldEqualFold(FieldValue, v)) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldContainsFold(FieldValue, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Setting) predicate.Setting { + return predicate.Setting(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Setting) predicate.Setting { + return predicate.Setting(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Setting) predicate.Setting { + return predicate.Setting(sql.NotPredicates(p)) +} diff --git a/backend/ent/setting_create.go b/backend/ent/setting_create.go new file mode 100644 index 00000000..66c1231e --- /dev/null +++ b/backend/ent/setting_create.go @@ -0,0 +1,589 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingCreate is the builder for creating a Setting entity. +type SettingCreate struct { + config + mutation *SettingMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetKey sets the "key" field. +func (_c *SettingCreate) SetKey(v string) *SettingCreate { + _c.mutation.SetKey(v) + return _c +} + +// SetValue sets the "value" field. +func (_c *SettingCreate) SetValue(v string) *SettingCreate { + _c.mutation.SetValue(v) + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *SettingCreate) SetUpdatedAt(v time.Time) *SettingCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *SettingCreate) SetNillableUpdatedAt(v *time.Time) *SettingCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// Mutation returns the SettingMutation object of the builder. +func (_c *SettingCreate) Mutation() *SettingMutation { + return _c.mutation +} + +// Save creates the Setting in the database. +func (_c *SettingCreate) Save(ctx context.Context) (*Setting, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *SettingCreate) SaveX(ctx context.Context) *Setting { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *SettingCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *SettingCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *SettingCreate) defaults() { + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := setting.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *SettingCreate) check() error { + if _, ok := _c.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "Setting.key"`)} + } + if v, ok := _c.mutation.Key(); ok { + if err := setting.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "Setting.key": %w`, err)} + } + } + if _, ok := _c.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "Setting.value"`)} + } + if v, ok := _c.mutation.Value(); ok { + if err := setting.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Setting.value": %w`, err)} + } + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Setting.updated_at"`)} + } + return nil +} + +func (_c *SettingCreate) sqlSave(ctx context.Context) (*Setting, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *SettingCreate) createSpec() (*Setting, *sqlgraph.CreateSpec) { + var ( + _node = &Setting{config: _c.config} + _spec = sqlgraph.NewCreateSpec(setting.Table, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Key(); ok { + _spec.SetField(setting.FieldKey, field.TypeString, value) + _node.Key = value + } + if value, ok := _c.mutation.Value(); ok { + _spec.SetField(setting.FieldValue, field.TypeString, value) + _node.Value = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(setting.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Setting.Create(). +// SetKey(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.SettingUpsert) { +// SetKey(v+v). +// }). +// Exec(ctx) +func (_c *SettingCreate) OnConflict(opts ...sql.ConflictOption) *SettingUpsertOne { + _c.conflict = opts + return &SettingUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *SettingCreate) OnConflictColumns(columns ...string) *SettingUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &SettingUpsertOne{ + create: _c, + } +} + +type ( + // SettingUpsertOne is the builder for "upsert"-ing + // one Setting node. + SettingUpsertOne struct { + create *SettingCreate + } + + // SettingUpsert is the "OnConflict" setter. + SettingUpsert struct { + *sql.UpdateSet + } +) + +// SetKey sets the "key" field. +func (u *SettingUpsert) SetKey(v string) *SettingUpsert { + u.Set(setting.FieldKey, v) + return u +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *SettingUpsert) UpdateKey() *SettingUpsert { + u.SetExcluded(setting.FieldKey) + return u +} + +// SetValue sets the "value" field. +func (u *SettingUpsert) SetValue(v string) *SettingUpsert { + u.Set(setting.FieldValue, v) + return u +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *SettingUpsert) UpdateValue() *SettingUpsert { + u.SetExcluded(setting.FieldValue) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *SettingUpsert) SetUpdatedAt(v time.Time) *SettingUpsert { + u.Set(setting.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *SettingUpsert) UpdateUpdatedAt() *SettingUpsert { + u.SetExcluded(setting.FieldUpdatedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *SettingUpsertOne) UpdateNewValues() *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *SettingUpsertOne) Ignore() *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *SettingUpsertOne) DoNothing() *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the SettingCreate.OnConflict +// documentation for more info. +func (u *SettingUpsertOne) Update(set func(*SettingUpsert)) *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&SettingUpsert{UpdateSet: update}) + })) + return u +} + +// SetKey sets the "key" field. +func (u *SettingUpsertOne) SetKey(v string) *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *SettingUpsertOne) UpdateKey() *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.UpdateKey() + }) +} + +// SetValue sets the "value" field. +func (u *SettingUpsertOne) SetValue(v string) *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *SettingUpsertOne) UpdateValue() *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.UpdateValue() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *SettingUpsertOne) SetUpdatedAt(v time.Time) *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *SettingUpsertOne) UpdateUpdatedAt() *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *SettingUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for SettingCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *SettingUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *SettingUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *SettingUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// SettingCreateBulk is the builder for creating many Setting entities in bulk. +type SettingCreateBulk struct { + config + err error + builders []*SettingCreate + conflict []sql.ConflictOption +} + +// Save creates the Setting entities in the database. +func (_c *SettingCreateBulk) Save(ctx context.Context) ([]*Setting, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Setting, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*SettingMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *SettingCreateBulk) SaveX(ctx context.Context) []*Setting { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *SettingCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *SettingCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Setting.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.SettingUpsert) { +// SetKey(v+v). +// }). +// Exec(ctx) +func (_c *SettingCreateBulk) OnConflict(opts ...sql.ConflictOption) *SettingUpsertBulk { + _c.conflict = opts + return &SettingUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *SettingCreateBulk) OnConflictColumns(columns ...string) *SettingUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &SettingUpsertBulk{ + create: _c, + } +} + +// SettingUpsertBulk is the builder for "upsert"-ing +// a bulk of Setting nodes. +type SettingUpsertBulk struct { + create *SettingCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *SettingUpsertBulk) UpdateNewValues() *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *SettingUpsertBulk) Ignore() *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *SettingUpsertBulk) DoNothing() *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the SettingCreateBulk.OnConflict +// documentation for more info. +func (u *SettingUpsertBulk) Update(set func(*SettingUpsert)) *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&SettingUpsert{UpdateSet: update}) + })) + return u +} + +// SetKey sets the "key" field. +func (u *SettingUpsertBulk) SetKey(v string) *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *SettingUpsertBulk) UpdateKey() *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.UpdateKey() + }) +} + +// SetValue sets the "value" field. +func (u *SettingUpsertBulk) SetValue(v string) *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *SettingUpsertBulk) UpdateValue() *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.UpdateValue() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *SettingUpsertBulk) SetUpdatedAt(v time.Time) *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *SettingUpsertBulk) UpdateUpdatedAt() *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *SettingUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the SettingCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for SettingCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *SettingUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/setting_delete.go b/backend/ent/setting_delete.go new file mode 100644 index 00000000..64919673 --- /dev/null +++ b/backend/ent/setting_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingDelete is the builder for deleting a Setting entity. +type SettingDelete struct { + config + hooks []Hook + mutation *SettingMutation +} + +// Where appends a list predicates to the SettingDelete builder. +func (_d *SettingDelete) Where(ps ...predicate.Setting) *SettingDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *SettingDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *SettingDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *SettingDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(setting.Table, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// SettingDeleteOne is the builder for deleting a single Setting entity. +type SettingDeleteOne struct { + _d *SettingDelete +} + +// Where appends a list predicates to the SettingDelete builder. +func (_d *SettingDeleteOne) Where(ps ...predicate.Setting) *SettingDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *SettingDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{setting.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *SettingDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/setting_query.go b/backend/ent/setting_query.go new file mode 100644 index 00000000..e9dda6f5 --- /dev/null +++ b/backend/ent/setting_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingQuery is the builder for querying Setting entities. +type SettingQuery struct { + config + ctx *QueryContext + order []setting.OrderOption + inters []Interceptor + predicates []predicate.Setting + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the SettingQuery builder. +func (_q *SettingQuery) Where(ps ...predicate.Setting) *SettingQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *SettingQuery) Limit(limit int) *SettingQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *SettingQuery) Offset(offset int) *SettingQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *SettingQuery) Unique(unique bool) *SettingQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *SettingQuery) Order(o ...setting.OrderOption) *SettingQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first Setting entity from the query. +// Returns a *NotFoundError when no Setting was found. +func (_q *SettingQuery) First(ctx context.Context) (*Setting, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{setting.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *SettingQuery) FirstX(ctx context.Context) *Setting { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Setting ID from the query. +// Returns a *NotFoundError when no Setting ID was found. +func (_q *SettingQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{setting.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *SettingQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Setting entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Setting entity is found. +// Returns a *NotFoundError when no Setting entities are found. +func (_q *SettingQuery) Only(ctx context.Context) (*Setting, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{setting.Label} + default: + return nil, &NotSingularError{setting.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *SettingQuery) OnlyX(ctx context.Context) *Setting { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Setting ID in the query. +// Returns a *NotSingularError when more than one Setting ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *SettingQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{setting.Label} + default: + err = &NotSingularError{setting.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *SettingQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Settings. +func (_q *SettingQuery) All(ctx context.Context) ([]*Setting, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Setting, *SettingQuery]() + return withInterceptors[[]*Setting](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *SettingQuery) AllX(ctx context.Context) []*Setting { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Setting IDs. +func (_q *SettingQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(setting.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *SettingQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *SettingQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*SettingQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *SettingQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *SettingQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *SettingQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the SettingQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *SettingQuery) Clone() *SettingQuery { + if _q == nil { + return nil + } + return &SettingQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]setting.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Setting{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Key string `json:"key,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Setting.Query(). +// GroupBy(setting.FieldKey). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *SettingQuery) GroupBy(field string, fields ...string) *SettingGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &SettingGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = setting.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Key string `json:"key,omitempty"` +// } +// +// client.Setting.Query(). +// Select(setting.FieldKey). +// Scan(ctx, &v) +func (_q *SettingQuery) Select(fields ...string) *SettingSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &SettingSelect{SettingQuery: _q} + sbuild.label = setting.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a SettingSelect configured with the given aggregations. +func (_q *SettingQuery) Aggregate(fns ...AggregateFunc) *SettingSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *SettingQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !setting.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *SettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Setting, error) { + var ( + nodes = []*Setting{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Setting).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Setting{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *SettingQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *SettingQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(setting.Table, setting.Columns, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, setting.FieldID) + for i := range fields { + if fields[i] != setting.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *SettingQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(setting.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = setting.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// SettingGroupBy is the group-by builder for Setting entities. +type SettingGroupBy struct { + selector + build *SettingQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *SettingGroupBy) Aggregate(fns ...AggregateFunc) *SettingGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *SettingGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SettingQuery, *SettingGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *SettingGroupBy) sqlScan(ctx context.Context, root *SettingQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// SettingSelect is the builder for selecting fields of Setting entities. +type SettingSelect struct { + *SettingQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *SettingSelect) Aggregate(fns ...AggregateFunc) *SettingSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *SettingSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SettingQuery, *SettingSelect](ctx, _s.SettingQuery, _s, _s.inters, v) +} + +func (_s *SettingSelect) sqlScan(ctx context.Context, root *SettingQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/setting_update.go b/backend/ent/setting_update.go new file mode 100644 index 00000000..007fa36e --- /dev/null +++ b/backend/ent/setting_update.go @@ -0,0 +1,316 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingUpdate is the builder for updating Setting entities. +type SettingUpdate struct { + config + hooks []Hook + mutation *SettingMutation +} + +// Where appends a list predicates to the SettingUpdate builder. +func (_u *SettingUpdate) Where(ps ...predicate.Setting) *SettingUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetKey sets the "key" field. +func (_u *SettingUpdate) SetKey(v string) *SettingUpdate { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *SettingUpdate) SetNillableKey(v *string) *SettingUpdate { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *SettingUpdate) SetValue(v string) *SettingUpdate { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *SettingUpdate) SetNillableValue(v *string) *SettingUpdate { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *SettingUpdate) SetUpdatedAt(v time.Time) *SettingUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the SettingMutation object of the builder. +func (_u *SettingUpdate) Mutation() *SettingMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *SettingUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *SettingUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *SettingUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *SettingUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *SettingUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := setting.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *SettingUpdate) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := setting.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "Setting.key": %w`, err)} + } + } + if v, ok := _u.mutation.Value(); ok { + if err := setting.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Setting.value": %w`, err)} + } + } + return nil +} + +func (_u *SettingUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(setting.Table, setting.Columns, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(setting.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(setting.FieldValue, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(setting.FieldUpdatedAt, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{setting.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// SettingUpdateOne is the builder for updating a single Setting entity. +type SettingUpdateOne struct { + config + fields []string + hooks []Hook + mutation *SettingMutation +} + +// SetKey sets the "key" field. +func (_u *SettingUpdateOne) SetKey(v string) *SettingUpdateOne { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *SettingUpdateOne) SetNillableKey(v *string) *SettingUpdateOne { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *SettingUpdateOne) SetValue(v string) *SettingUpdateOne { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *SettingUpdateOne) SetNillableValue(v *string) *SettingUpdateOne { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *SettingUpdateOne) SetUpdatedAt(v time.Time) *SettingUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the SettingMutation object of the builder. +func (_u *SettingUpdateOne) Mutation() *SettingMutation { + return _u.mutation +} + +// Where appends a list predicates to the SettingUpdate builder. +func (_u *SettingUpdateOne) Where(ps ...predicate.Setting) *SettingUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *SettingUpdateOne) Select(field string, fields ...string) *SettingUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Setting entity. +func (_u *SettingUpdateOne) Save(ctx context.Context) (*Setting, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *SettingUpdateOne) SaveX(ctx context.Context) *Setting { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *SettingUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *SettingUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *SettingUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := setting.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *SettingUpdateOne) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := setting.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "Setting.key": %w`, err)} + } + } + if v, ok := _u.mutation.Value(); ok { + if err := setting.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Setting.value": %w`, err)} + } + } + return nil +} + +func (_u *SettingUpdateOne) sqlSave(ctx context.Context) (_node *Setting, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(setting.Table, setting.Columns, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Setting.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, setting.FieldID) + for _, f := range fields { + if !setting.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != setting.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(setting.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(setting.FieldValue, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(setting.FieldUpdatedAt, field.TypeTime, value) + } + _node = &Setting{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{setting.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/tx.go b/backend/ent/tx.go new file mode 100644 index 00000000..cf5b510d --- /dev/null +++ b/backend/ent/tx.go @@ -0,0 +1,237 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Account is the client for interacting with the Account builders. + Account *AccountClient + // AccountGroup is the client for interacting with the AccountGroup builders. + AccountGroup *AccountGroupClient + // ApiKey is the client for interacting with the ApiKey builders. + ApiKey *ApiKeyClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + // Proxy is the client for interacting with the Proxy builders. + Proxy *ProxyClient + // RedeemCode is the client for interacting with the RedeemCode builders. + RedeemCode *RedeemCodeClient + // Setting is the client for interacting with the Setting builders. + Setting *SettingClient + // User is the client for interacting with the User builders. + User *UserClient + // UserAllowedGroup is the client for interacting with the UserAllowedGroup builders. + UserAllowedGroup *UserAllowedGroupClient + // UserSubscription is the client for interacting with the UserSubscription builders. + UserSubscription *UserSubscriptionClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.Account = NewAccountClient(tx.config) + tx.AccountGroup = NewAccountGroupClient(tx.config) + tx.ApiKey = NewApiKeyClient(tx.config) + tx.Group = NewGroupClient(tx.config) + tx.Proxy = NewProxyClient(tx.config) + tx.RedeemCode = NewRedeemCodeClient(tx.config) + tx.Setting = NewSettingClient(tx.config) + tx.User = NewUserClient(tx.config) + tx.UserAllowedGroup = NewUserAllowedGroupClient(tx.config) + tx.UserSubscription = NewUserSubscriptionClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Account.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/backend/ent/user.go b/backend/ent/user.go new file mode 100644 index 00000000..1f06eb4e --- /dev/null +++ b/backend/ent/user.go @@ -0,0 +1,338 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// User is the model entity for the User schema. +type User struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Email holds the value of the "email" field. + Email string `json:"email,omitempty"` + // PasswordHash holds the value of the "password_hash" field. + PasswordHash string `json:"password_hash,omitempty"` + // Role holds the value of the "role" field. + Role string `json:"role,omitempty"` + // Balance holds the value of the "balance" field. + Balance float64 `json:"balance,omitempty"` + // Concurrency holds the value of the "concurrency" field. + Concurrency int `json:"concurrency,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Username holds the value of the "username" field. + Username string `json:"username,omitempty"` + // Wechat holds the value of the "wechat" field. + Wechat string `json:"wechat,omitempty"` + // Notes holds the value of the "notes" field. + Notes string `json:"notes,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserQuery when eager-loading is set. + Edges UserEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserEdges holds the relations/edges for other nodes in the graph. +type UserEdges struct { + // APIKeys holds the value of the api_keys edge. + APIKeys []*ApiKey `json:"api_keys,omitempty"` + // RedeemCodes holds the value of the redeem_codes edge. + RedeemCodes []*RedeemCode `json:"redeem_codes,omitempty"` + // Subscriptions holds the value of the subscriptions edge. + Subscriptions []*UserSubscription `json:"subscriptions,omitempty"` + // AssignedSubscriptions holds the value of the assigned_subscriptions edge. + AssignedSubscriptions []*UserSubscription `json:"assigned_subscriptions,omitempty"` + // AllowedGroups holds the value of the allowed_groups edge. + AllowedGroups []*Group `json:"allowed_groups,omitempty"` + // UserAllowedGroups holds the value of the user_allowed_groups edge. + UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [6]bool +} + +// APIKeysOrErr returns the APIKeys value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) APIKeysOrErr() ([]*ApiKey, error) { + if e.loadedTypes[0] { + return e.APIKeys, nil + } + return nil, &NotLoadedError{edge: "api_keys"} +} + +// RedeemCodesOrErr returns the RedeemCodes value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) RedeemCodesOrErr() ([]*RedeemCode, error) { + if e.loadedTypes[1] { + return e.RedeemCodes, nil + } + return nil, &NotLoadedError{edge: "redeem_codes"} +} + +// SubscriptionsOrErr returns the Subscriptions value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) SubscriptionsOrErr() ([]*UserSubscription, error) { + if e.loadedTypes[2] { + return e.Subscriptions, nil + } + return nil, &NotLoadedError{edge: "subscriptions"} +} + +// AssignedSubscriptionsOrErr returns the AssignedSubscriptions value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AssignedSubscriptionsOrErr() ([]*UserSubscription, error) { + if e.loadedTypes[3] { + return e.AssignedSubscriptions, nil + } + return nil, &NotLoadedError{edge: "assigned_subscriptions"} +} + +// AllowedGroupsOrErr returns the AllowedGroups value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AllowedGroupsOrErr() ([]*Group, error) { + if e.loadedTypes[4] { + return e.AllowedGroups, nil + } + return nil, &NotLoadedError{edge: "allowed_groups"} +} + +// UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) { + if e.loadedTypes[5] { + return e.UserAllowedGroups, nil + } + return nil, &NotLoadedError{edge: "user_allowed_groups"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*User) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case user.FieldBalance: + values[i] = new(sql.NullFloat64) + case user.FieldID, user.FieldConcurrency: + values[i] = new(sql.NullInt64) + case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldWechat, user.FieldNotes: + values[i] = new(sql.NullString) + case user.FieldCreatedAt, user.FieldUpdatedAt, user.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the User fields. +func (_m *User) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case user.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case user.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case user.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case user.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case user.FieldEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field email", values[i]) + } else if value.Valid { + _m.Email = value.String + } + case user.FieldPasswordHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password_hash", values[i]) + } else if value.Valid { + _m.PasswordHash = value.String + } + case user.FieldRole: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field role", values[i]) + } else if value.Valid { + _m.Role = value.String + } + case user.FieldBalance: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field balance", values[i]) + } else if value.Valid { + _m.Balance = value.Float64 + } + case user.FieldConcurrency: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field concurrency", values[i]) + } else if value.Valid { + _m.Concurrency = int(value.Int64) + } + case user.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case user.FieldUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field username", values[i]) + } else if value.Valid { + _m.Username = value.String + } + case user.FieldWechat: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field wechat", values[i]) + } else if value.Valid { + _m.Wechat = value.String + } + case user.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (_m *User) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAPIKeys queries the "api_keys" edge of the User entity. +func (_m *User) QueryAPIKeys() *ApiKeyQuery { + return NewUserClient(_m.config).QueryAPIKeys(_m) +} + +// QueryRedeemCodes queries the "redeem_codes" edge of the User entity. +func (_m *User) QueryRedeemCodes() *RedeemCodeQuery { + return NewUserClient(_m.config).QueryRedeemCodes(_m) +} + +// QuerySubscriptions queries the "subscriptions" edge of the User entity. +func (_m *User) QuerySubscriptions() *UserSubscriptionQuery { + return NewUserClient(_m.config).QuerySubscriptions(_m) +} + +// QueryAssignedSubscriptions queries the "assigned_subscriptions" edge of the User entity. +func (_m *User) QueryAssignedSubscriptions() *UserSubscriptionQuery { + return NewUserClient(_m.config).QueryAssignedSubscriptions(_m) +} + +// QueryAllowedGroups queries the "allowed_groups" edge of the User entity. +func (_m *User) QueryAllowedGroups() *GroupQuery { + return NewUserClient(_m.config).QueryAllowedGroups(_m) +} + +// QueryUserAllowedGroups queries the "user_allowed_groups" edge of the User entity. +func (_m *User) QueryUserAllowedGroups() *UserAllowedGroupQuery { + return NewUserClient(_m.config).QueryUserAllowedGroups(_m) +} + +// Update returns a builder for updating this User. +// Note that you need to call User.Unwrap() before calling this method if this User +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *User) Update() *UserUpdateOne { + return NewUserClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the User entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *User) Unwrap() *User { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: User is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *User) String() string { + var builder strings.Builder + builder.WriteString("User(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("email=") + builder.WriteString(_m.Email) + builder.WriteString(", ") + builder.WriteString("password_hash=") + builder.WriteString(_m.PasswordHash) + builder.WriteString(", ") + builder.WriteString("role=") + builder.WriteString(_m.Role) + builder.WriteString(", ") + builder.WriteString("balance=") + builder.WriteString(fmt.Sprintf("%v", _m.Balance)) + builder.WriteString(", ") + builder.WriteString("concurrency=") + builder.WriteString(fmt.Sprintf("%v", _m.Concurrency)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("username=") + builder.WriteString(_m.Username) + builder.WriteString(", ") + builder.WriteString("wechat=") + builder.WriteString(_m.Wechat) + builder.WriteString(", ") + builder.WriteString("notes=") + builder.WriteString(_m.Notes) + builder.WriteByte(')') + return builder.String() +} + +// Users is a parsable slice of User. +type Users []*User diff --git a/backend/ent/user/user.go b/backend/ent/user/user.go new file mode 100644 index 00000000..e1e6988b --- /dev/null +++ b/backend/ent/user/user.go @@ -0,0 +1,365 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the user type in the database. + Label = "user" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldEmail holds the string denoting the email field in the database. + FieldEmail = "email" + // FieldPasswordHash holds the string denoting the password_hash field in the database. + FieldPasswordHash = "password_hash" + // FieldRole holds the string denoting the role field in the database. + FieldRole = "role" + // FieldBalance holds the string denoting the balance field in the database. + FieldBalance = "balance" + // FieldConcurrency holds the string denoting the concurrency field in the database. + FieldConcurrency = "concurrency" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldUsername holds the string denoting the username field in the database. + FieldUsername = "username" + // FieldWechat holds the string denoting the wechat field in the database. + FieldWechat = "wechat" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. + EdgeAPIKeys = "api_keys" + // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. + EdgeRedeemCodes = "redeem_codes" + // EdgeSubscriptions holds the string denoting the subscriptions edge name in mutations. + EdgeSubscriptions = "subscriptions" + // EdgeAssignedSubscriptions holds the string denoting the assigned_subscriptions edge name in mutations. + EdgeAssignedSubscriptions = "assigned_subscriptions" + // EdgeAllowedGroups holds the string denoting the allowed_groups edge name in mutations. + EdgeAllowedGroups = "allowed_groups" + // EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations. + EdgeUserAllowedGroups = "user_allowed_groups" + // Table holds the table name of the user in the database. + Table = "users" + // APIKeysTable is the table that holds the api_keys relation/edge. + APIKeysTable = "api_keys" + // APIKeysInverseTable is the table name for the ApiKey entity. + // It exists in this package in order to avoid circular dependency with the "apikey" package. + APIKeysInverseTable = "api_keys" + // APIKeysColumn is the table column denoting the api_keys relation/edge. + APIKeysColumn = "user_id" + // RedeemCodesTable is the table that holds the redeem_codes relation/edge. + RedeemCodesTable = "redeem_codes" + // RedeemCodesInverseTable is the table name for the RedeemCode entity. + // It exists in this package in order to avoid circular dependency with the "redeemcode" package. + RedeemCodesInverseTable = "redeem_codes" + // RedeemCodesColumn is the table column denoting the redeem_codes relation/edge. + RedeemCodesColumn = "used_by" + // SubscriptionsTable is the table that holds the subscriptions relation/edge. + SubscriptionsTable = "user_subscriptions" + // SubscriptionsInverseTable is the table name for the UserSubscription entity. + // It exists in this package in order to avoid circular dependency with the "usersubscription" package. + SubscriptionsInverseTable = "user_subscriptions" + // SubscriptionsColumn is the table column denoting the subscriptions relation/edge. + SubscriptionsColumn = "user_id" + // AssignedSubscriptionsTable is the table that holds the assigned_subscriptions relation/edge. + AssignedSubscriptionsTable = "user_subscriptions" + // AssignedSubscriptionsInverseTable is the table name for the UserSubscription entity. + // It exists in this package in order to avoid circular dependency with the "usersubscription" package. + AssignedSubscriptionsInverseTable = "user_subscriptions" + // AssignedSubscriptionsColumn is the table column denoting the assigned_subscriptions relation/edge. + AssignedSubscriptionsColumn = "assigned_by" + // AllowedGroupsTable is the table that holds the allowed_groups relation/edge. The primary key declared below. + AllowedGroupsTable = "user_allowed_groups" + // AllowedGroupsInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + AllowedGroupsInverseTable = "groups" + // UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge. + UserAllowedGroupsTable = "user_allowed_groups" + // UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity. + // It exists in this package in order to avoid circular dependency with the "userallowedgroup" package. + UserAllowedGroupsInverseTable = "user_allowed_groups" + // UserAllowedGroupsColumn is the table column denoting the user_allowed_groups relation/edge. + UserAllowedGroupsColumn = "user_id" +) + +// Columns holds all SQL columns for user fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldEmail, + FieldPasswordHash, + FieldRole, + FieldBalance, + FieldConcurrency, + FieldStatus, + FieldUsername, + FieldWechat, + FieldNotes, +} + +var ( + // AllowedGroupsPrimaryKey and AllowedGroupsColumn2 are the table columns denoting the + // primary key for the allowed_groups relation (M2M). + AllowedGroupsPrimaryKey = []string{"user_id", "group_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // EmailValidator is a validator for the "email" field. It is called by the builders before save. + EmailValidator func(string) error + // PasswordHashValidator is a validator for the "password_hash" field. It is called by the builders before save. + PasswordHashValidator func(string) error + // DefaultRole holds the default value on creation for the "role" field. + DefaultRole string + // RoleValidator is a validator for the "role" field. It is called by the builders before save. + RoleValidator func(string) error + // DefaultBalance holds the default value on creation for the "balance" field. + DefaultBalance float64 + // DefaultConcurrency holds the default value on creation for the "concurrency" field. + DefaultConcurrency int + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultUsername holds the default value on creation for the "username" field. + DefaultUsername string + // UsernameValidator is a validator for the "username" field. It is called by the builders before save. + UsernameValidator func(string) error + // DefaultWechat holds the default value on creation for the "wechat" field. + DefaultWechat string + // WechatValidator is a validator for the "wechat" field. It is called by the builders before save. + WechatValidator func(string) error + // DefaultNotes holds the default value on creation for the "notes" field. + DefaultNotes string +) + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByPasswordHash orders the results by the password_hash field. +func ByPasswordHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPasswordHash, opts...).ToFunc() +} + +// ByRole orders the results by the role field. +func ByRole(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRole, opts...).ToFunc() +} + +// ByBalance orders the results by the balance field. +func ByBalance(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBalance, opts...).ToFunc() +} + +// ByConcurrency orders the results by the concurrency field. +func ByConcurrency(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConcurrency, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByWechat orders the results by the wechat field. +func ByWechat(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWechat, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByAPIKeysCount orders the results by api_keys count. +func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAPIKeysStep(), opts...) + } +} + +// ByAPIKeys orders the results by api_keys terms. +func ByAPIKeys(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAPIKeysStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByRedeemCodesCount orders the results by redeem_codes count. +func ByRedeemCodesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRedeemCodesStep(), opts...) + } +} + +// ByRedeemCodes orders the results by redeem_codes terms. +func ByRedeemCodes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRedeemCodesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// BySubscriptionsCount orders the results by subscriptions count. +func BySubscriptionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newSubscriptionsStep(), opts...) + } +} + +// BySubscriptions orders the results by subscriptions terms. +func BySubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newSubscriptionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAssignedSubscriptionsCount orders the results by assigned_subscriptions count. +func ByAssignedSubscriptionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAssignedSubscriptionsStep(), opts...) + } +} + +// ByAssignedSubscriptions orders the results by assigned_subscriptions terms. +func ByAssignedSubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAssignedSubscriptionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAllowedGroupsCount orders the results by allowed_groups count. +func ByAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAllowedGroupsStep(), opts...) + } +} + +// ByAllowedGroups orders the results by allowed_groups terms. +func ByAllowedGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAllowedGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUserAllowedGroupsCount orders the results by user_allowed_groups count. +func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUserAllowedGroupsStep(), opts...) + } +} + +// ByUserAllowedGroups orders the results by user_allowed_groups terms. +func ByUserAllowedGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserAllowedGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAPIKeysStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(APIKeysInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) +} +func newRedeemCodesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RedeemCodesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) +} +func newSubscriptionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(SubscriptionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) +} +func newAssignedSubscriptionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AssignedSubscriptionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AssignedSubscriptionsTable, AssignedSubscriptionsColumn), + ) +} +func newAllowedGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AllowedGroupsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, AllowedGroupsTable, AllowedGroupsPrimaryKey...), + ) +} +func newUserAllowedGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserAllowedGroupsInverseTable, UserAllowedGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) +} diff --git a/backend/ent/user/where.go b/backend/ent/user/where.go new file mode 100644 index 00000000..ad434c59 --- /dev/null +++ b/backend/ent/user/where.go @@ -0,0 +1,934 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.User { + return predicate.User(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.User { + return predicate.User(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.User { + return predicate.User(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.User { + return predicate.User(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.User { + return predicate.User(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.User { + return predicate.User(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.User { + return predicate.User(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Email applies equality check predicate on the "email" field. It's identical to EmailEQ. +func Email(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// PasswordHash applies equality check predicate on the "password_hash" field. It's identical to PasswordHashEQ. +func PasswordHash(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// Role applies equality check predicate on the "role" field. It's identical to RoleEQ. +func Role(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldRole, v)) +} + +// Balance applies equality check predicate on the "balance" field. It's identical to BalanceEQ. +func Balance(v float64) predicate.User { + return predicate.User(sql.FieldEQ(FieldBalance, v)) +} + +// Concurrency applies equality check predicate on the "concurrency" field. It's identical to ConcurrencyEQ. +func Concurrency(v int) predicate.User { + return predicate.User(sql.FieldEQ(FieldConcurrency, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldStatus, v)) +} + +// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. +func Username(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldUsername, v)) +} + +// Wechat applies equality check predicate on the "wechat" field. It's identical to WechatEQ. +func Wechat(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldWechat, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldDeletedAt)) +} + +// EmailEQ applies the EQ predicate on the "email" field. +func EmailEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// EmailNEQ applies the NEQ predicate on the "email" field. +func EmailNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldEmail, v)) +} + +// EmailIn applies the In predicate on the "email" field. +func EmailIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldEmail, vs...)) +} + +// EmailNotIn applies the NotIn predicate on the "email" field. +func EmailNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldEmail, vs...)) +} + +// EmailGT applies the GT predicate on the "email" field. +func EmailGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldEmail, v)) +} + +// EmailGTE applies the GTE predicate on the "email" field. +func EmailGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldEmail, v)) +} + +// EmailLT applies the LT predicate on the "email" field. +func EmailLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldEmail, v)) +} + +// EmailLTE applies the LTE predicate on the "email" field. +func EmailLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldEmail, v)) +} + +// EmailContains applies the Contains predicate on the "email" field. +func EmailContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldEmail, v)) +} + +// EmailHasPrefix applies the HasPrefix predicate on the "email" field. +func EmailHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldEmail, v)) +} + +// EmailHasSuffix applies the HasSuffix predicate on the "email" field. +func EmailHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldEmail, v)) +} + +// EmailEqualFold applies the EqualFold predicate on the "email" field. +func EmailEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldEmail, v)) +} + +// EmailContainsFold applies the ContainsFold predicate on the "email" field. +func EmailContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldEmail, v)) +} + +// PasswordHashEQ applies the EQ predicate on the "password_hash" field. +func PasswordHashEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// PasswordHashNEQ applies the NEQ predicate on the "password_hash" field. +func PasswordHashNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldPasswordHash, v)) +} + +// PasswordHashIn applies the In predicate on the "password_hash" field. +func PasswordHashIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldPasswordHash, vs...)) +} + +// PasswordHashNotIn applies the NotIn predicate on the "password_hash" field. +func PasswordHashNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldPasswordHash, vs...)) +} + +// PasswordHashGT applies the GT predicate on the "password_hash" field. +func PasswordHashGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldPasswordHash, v)) +} + +// PasswordHashGTE applies the GTE predicate on the "password_hash" field. +func PasswordHashGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldPasswordHash, v)) +} + +// PasswordHashLT applies the LT predicate on the "password_hash" field. +func PasswordHashLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldPasswordHash, v)) +} + +// PasswordHashLTE applies the LTE predicate on the "password_hash" field. +func PasswordHashLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldPasswordHash, v)) +} + +// PasswordHashContains applies the Contains predicate on the "password_hash" field. +func PasswordHashContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldPasswordHash, v)) +} + +// PasswordHashHasPrefix applies the HasPrefix predicate on the "password_hash" field. +func PasswordHashHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldPasswordHash, v)) +} + +// PasswordHashHasSuffix applies the HasSuffix predicate on the "password_hash" field. +func PasswordHashHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldPasswordHash, v)) +} + +// PasswordHashEqualFold applies the EqualFold predicate on the "password_hash" field. +func PasswordHashEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldPasswordHash, v)) +} + +// PasswordHashContainsFold applies the ContainsFold predicate on the "password_hash" field. +func PasswordHashContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldPasswordHash, v)) +} + +// RoleEQ applies the EQ predicate on the "role" field. +func RoleEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldRole, v)) +} + +// RoleNEQ applies the NEQ predicate on the "role" field. +func RoleNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldRole, v)) +} + +// RoleIn applies the In predicate on the "role" field. +func RoleIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldRole, vs...)) +} + +// RoleNotIn applies the NotIn predicate on the "role" field. +func RoleNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldRole, vs...)) +} + +// RoleGT applies the GT predicate on the "role" field. +func RoleGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldRole, v)) +} + +// RoleGTE applies the GTE predicate on the "role" field. +func RoleGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldRole, v)) +} + +// RoleLT applies the LT predicate on the "role" field. +func RoleLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldRole, v)) +} + +// RoleLTE applies the LTE predicate on the "role" field. +func RoleLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldRole, v)) +} + +// RoleContains applies the Contains predicate on the "role" field. +func RoleContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldRole, v)) +} + +// RoleHasPrefix applies the HasPrefix predicate on the "role" field. +func RoleHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldRole, v)) +} + +// RoleHasSuffix applies the HasSuffix predicate on the "role" field. +func RoleHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldRole, v)) +} + +// RoleEqualFold applies the EqualFold predicate on the "role" field. +func RoleEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldRole, v)) +} + +// RoleContainsFold applies the ContainsFold predicate on the "role" field. +func RoleContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldRole, v)) +} + +// BalanceEQ applies the EQ predicate on the "balance" field. +func BalanceEQ(v float64) predicate.User { + return predicate.User(sql.FieldEQ(FieldBalance, v)) +} + +// BalanceNEQ applies the NEQ predicate on the "balance" field. +func BalanceNEQ(v float64) predicate.User { + return predicate.User(sql.FieldNEQ(FieldBalance, v)) +} + +// BalanceIn applies the In predicate on the "balance" field. +func BalanceIn(vs ...float64) predicate.User { + return predicate.User(sql.FieldIn(FieldBalance, vs...)) +} + +// BalanceNotIn applies the NotIn predicate on the "balance" field. +func BalanceNotIn(vs ...float64) predicate.User { + return predicate.User(sql.FieldNotIn(FieldBalance, vs...)) +} + +// BalanceGT applies the GT predicate on the "balance" field. +func BalanceGT(v float64) predicate.User { + return predicate.User(sql.FieldGT(FieldBalance, v)) +} + +// BalanceGTE applies the GTE predicate on the "balance" field. +func BalanceGTE(v float64) predicate.User { + return predicate.User(sql.FieldGTE(FieldBalance, v)) +} + +// BalanceLT applies the LT predicate on the "balance" field. +func BalanceLT(v float64) predicate.User { + return predicate.User(sql.FieldLT(FieldBalance, v)) +} + +// BalanceLTE applies the LTE predicate on the "balance" field. +func BalanceLTE(v float64) predicate.User { + return predicate.User(sql.FieldLTE(FieldBalance, v)) +} + +// ConcurrencyEQ applies the EQ predicate on the "concurrency" field. +func ConcurrencyEQ(v int) predicate.User { + return predicate.User(sql.FieldEQ(FieldConcurrency, v)) +} + +// ConcurrencyNEQ applies the NEQ predicate on the "concurrency" field. +func ConcurrencyNEQ(v int) predicate.User { + return predicate.User(sql.FieldNEQ(FieldConcurrency, v)) +} + +// ConcurrencyIn applies the In predicate on the "concurrency" field. +func ConcurrencyIn(vs ...int) predicate.User { + return predicate.User(sql.FieldIn(FieldConcurrency, vs...)) +} + +// ConcurrencyNotIn applies the NotIn predicate on the "concurrency" field. +func ConcurrencyNotIn(vs ...int) predicate.User { + return predicate.User(sql.FieldNotIn(FieldConcurrency, vs...)) +} + +// ConcurrencyGT applies the GT predicate on the "concurrency" field. +func ConcurrencyGT(v int) predicate.User { + return predicate.User(sql.FieldGT(FieldConcurrency, v)) +} + +// ConcurrencyGTE applies the GTE predicate on the "concurrency" field. +func ConcurrencyGTE(v int) predicate.User { + return predicate.User(sql.FieldGTE(FieldConcurrency, v)) +} + +// ConcurrencyLT applies the LT predicate on the "concurrency" field. +func ConcurrencyLT(v int) predicate.User { + return predicate.User(sql.FieldLT(FieldConcurrency, v)) +} + +// ConcurrencyLTE applies the LTE predicate on the "concurrency" field. +func ConcurrencyLTE(v int) predicate.User { + return predicate.User(sql.FieldLTE(FieldConcurrency, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldStatus, v)) +} + +// UsernameEQ applies the EQ predicate on the "username" field. +func UsernameEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldUsername, v)) +} + +// UsernameNEQ applies the NEQ predicate on the "username" field. +func UsernameNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUsername, v)) +} + +// UsernameIn applies the In predicate on the "username" field. +func UsernameIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldUsername, vs...)) +} + +// UsernameNotIn applies the NotIn predicate on the "username" field. +func UsernameNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUsername, vs...)) +} + +// UsernameGT applies the GT predicate on the "username" field. +func UsernameGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldUsername, v)) +} + +// UsernameGTE applies the GTE predicate on the "username" field. +func UsernameGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldUsername, v)) +} + +// UsernameLT applies the LT predicate on the "username" field. +func UsernameLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldUsername, v)) +} + +// UsernameLTE applies the LTE predicate on the "username" field. +func UsernameLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldUsername, v)) +} + +// UsernameContains applies the Contains predicate on the "username" field. +func UsernameContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldUsername, v)) +} + +// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. +func UsernameHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldUsername, v)) +} + +// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. +func UsernameHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldUsername, v)) +} + +// UsernameEqualFold applies the EqualFold predicate on the "username" field. +func UsernameEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldUsername, v)) +} + +// UsernameContainsFold applies the ContainsFold predicate on the "username" field. +func UsernameContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldUsername, v)) +} + +// WechatEQ applies the EQ predicate on the "wechat" field. +func WechatEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldWechat, v)) +} + +// WechatNEQ applies the NEQ predicate on the "wechat" field. +func WechatNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldWechat, v)) +} + +// WechatIn applies the In predicate on the "wechat" field. +func WechatIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldWechat, vs...)) +} + +// WechatNotIn applies the NotIn predicate on the "wechat" field. +func WechatNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldWechat, vs...)) +} + +// WechatGT applies the GT predicate on the "wechat" field. +func WechatGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldWechat, v)) +} + +// WechatGTE applies the GTE predicate on the "wechat" field. +func WechatGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldWechat, v)) +} + +// WechatLT applies the LT predicate on the "wechat" field. +func WechatLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldWechat, v)) +} + +// WechatLTE applies the LTE predicate on the "wechat" field. +func WechatLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldWechat, v)) +} + +// WechatContains applies the Contains predicate on the "wechat" field. +func WechatContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldWechat, v)) +} + +// WechatHasPrefix applies the HasPrefix predicate on the "wechat" field. +func WechatHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldWechat, v)) +} + +// WechatHasSuffix applies the HasSuffix predicate on the "wechat" field. +func WechatHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldWechat, v)) +} + +// WechatEqualFold applies the EqualFold predicate on the "wechat" field. +func WechatEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldWechat, v)) +} + +// WechatContainsFold applies the ContainsFold predicate on the "wechat" field. +func WechatContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldWechat, v)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldNotes, v)) +} + +// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. +func HasAPIKeys() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAPIKeysWith applies the HasEdge predicate on the "api_keys" edge with a given conditions (other predicates). +func HasAPIKeysWith(preds ...predicate.ApiKey) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAPIKeysStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasRedeemCodes applies the HasEdge predicate on the "redeem_codes" edge. +func HasRedeemCodes() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasRedeemCodesWith applies the HasEdge predicate on the "redeem_codes" edge with a given conditions (other predicates). +func HasRedeemCodesWith(preds ...predicate.RedeemCode) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newRedeemCodesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasSubscriptions applies the HasEdge predicate on the "subscriptions" edge. +func HasSubscriptions() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasSubscriptionsWith applies the HasEdge predicate on the "subscriptions" edge with a given conditions (other predicates). +func HasSubscriptionsWith(preds ...predicate.UserSubscription) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newSubscriptionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAssignedSubscriptions applies the HasEdge predicate on the "assigned_subscriptions" edge. +func HasAssignedSubscriptions() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AssignedSubscriptionsTable, AssignedSubscriptionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAssignedSubscriptionsWith applies the HasEdge predicate on the "assigned_subscriptions" edge with a given conditions (other predicates). +func HasAssignedSubscriptionsWith(preds ...predicate.UserSubscription) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAssignedSubscriptionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAllowedGroups applies the HasEdge predicate on the "allowed_groups" edge. +func HasAllowedGroups() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, AllowedGroupsTable, AllowedGroupsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAllowedGroupsWith applies the HasEdge predicate on the "allowed_groups" edge with a given conditions (other predicates). +func HasAllowedGroupsWith(preds ...predicate.Group) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAllowedGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUserAllowedGroups applies the HasEdge predicate on the "user_allowed_groups" edge. +func HasUserAllowedGroups() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserAllowedGroupsWith applies the HasEdge predicate on the "user_allowed_groups" edge with a given conditions (other predicates). +func HasUserAllowedGroupsWith(preds ...predicate.UserAllowedGroup) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newUserAllowedGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.User) predicate.User { + return predicate.User(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.User) predicate.User { + return predicate.User(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.User) predicate.User { + return predicate.User(sql.NotPredicates(p)) +} diff --git a/backend/ent/user_create.go b/backend/ent/user_create.go new file mode 100644 index 00000000..8c9caaa2 --- /dev/null +++ b/backend/ent/user_create.go @@ -0,0 +1,1391 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserCreate is the builder for creating a User entity. +type UserCreate struct { + config + mutation *UserMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserCreate) SetCreatedAt(v time.Time) *UserCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableCreatedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserCreate) SetUpdatedAt(v time.Time) *UserCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableUpdatedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *UserCreate) SetDeletedAt(v time.Time) *UserCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableDeletedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetEmail sets the "email" field. +func (_c *UserCreate) SetEmail(v string) *UserCreate { + _c.mutation.SetEmail(v) + return _c +} + +// SetPasswordHash sets the "password_hash" field. +func (_c *UserCreate) SetPasswordHash(v string) *UserCreate { + _c.mutation.SetPasswordHash(v) + return _c +} + +// SetRole sets the "role" field. +func (_c *UserCreate) SetRole(v string) *UserCreate { + _c.mutation.SetRole(v) + return _c +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (_c *UserCreate) SetNillableRole(v *string) *UserCreate { + if v != nil { + _c.SetRole(*v) + } + return _c +} + +// SetBalance sets the "balance" field. +func (_c *UserCreate) SetBalance(v float64) *UserCreate { + _c.mutation.SetBalance(v) + return _c +} + +// SetNillableBalance sets the "balance" field if the given value is not nil. +func (_c *UserCreate) SetNillableBalance(v *float64) *UserCreate { + if v != nil { + _c.SetBalance(*v) + } + return _c +} + +// SetConcurrency sets the "concurrency" field. +func (_c *UserCreate) SetConcurrency(v int) *UserCreate { + _c.mutation.SetConcurrency(v) + return _c +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_c *UserCreate) SetNillableConcurrency(v *int) *UserCreate { + if v != nil { + _c.SetConcurrency(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *UserCreate) SetStatus(v string) *UserCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *UserCreate) SetNillableStatus(v *string) *UserCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetUsername sets the "username" field. +func (_c *UserCreate) SetUsername(v string) *UserCreate { + _c.mutation.SetUsername(v) + return _c +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_c *UserCreate) SetNillableUsername(v *string) *UserCreate { + if v != nil { + _c.SetUsername(*v) + } + return _c +} + +// SetWechat sets the "wechat" field. +func (_c *UserCreate) SetWechat(v string) *UserCreate { + _c.mutation.SetWechat(v) + return _c +} + +// SetNillableWechat sets the "wechat" field if the given value is not nil. +func (_c *UserCreate) SetNillableWechat(v *string) *UserCreate { + if v != nil { + _c.SetWechat(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *UserCreate) SetNotes(v string) *UserCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *UserCreate) SetNillableNotes(v *string) *UserCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by IDs. +func (_c *UserCreate) AddAPIKeyIDs(ids ...int64) *UserCreate { + _c.mutation.AddAPIKeyIDs(ids...) + return _c +} + +// AddAPIKeys adds the "api_keys" edges to the ApiKey entity. +func (_c *UserCreate) AddAPIKeys(v ...*ApiKey) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_c *UserCreate) AddRedeemCodeIDs(ids ...int64) *UserCreate { + _c.mutation.AddRedeemCodeIDs(ids...) + return _c +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_c *UserCreate) AddRedeemCodes(v ...*RedeemCode) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_c *UserCreate) AddSubscriptionIDs(ids ...int64) *UserCreate { + _c.mutation.AddSubscriptionIDs(ids...) + return _c +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_c *UserCreate) AddSubscriptions(v ...*UserSubscription) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddSubscriptionIDs(ids...) +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (_c *UserCreate) AddAssignedSubscriptionIDs(ids ...int64) *UserCreate { + _c.mutation.AddAssignedSubscriptionIDs(ids...) + return _c +} + +// AddAssignedSubscriptions adds the "assigned_subscriptions" edges to the UserSubscription entity. +func (_c *UserCreate) AddAssignedSubscriptions(v ...*UserSubscription) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAssignedSubscriptionIDs(ids...) +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. +func (_c *UserCreate) AddAllowedGroupIDs(ids ...int64) *UserCreate { + _c.mutation.AddAllowedGroupIDs(ids...) + return _c +} + +// AddAllowedGroups adds the "allowed_groups" edges to the Group entity. +func (_c *UserCreate) AddAllowedGroups(v ...*Group) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAllowedGroupIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (_c *UserCreate) Mutation() *UserMutation { + return _c.mutation +} + +// Save creates the User in the database. +func (_c *UserCreate) Save(ctx context.Context) (*User, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserCreate) SaveX(ctx context.Context) *User { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if user.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized user.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := user.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if user.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized user.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := user.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Role(); !ok { + v := user.DefaultRole + _c.mutation.SetRole(v) + } + if _, ok := _c.mutation.Balance(); !ok { + v := user.DefaultBalance + _c.mutation.SetBalance(v) + } + if _, ok := _c.mutation.Concurrency(); !ok { + v := user.DefaultConcurrency + _c.mutation.SetConcurrency(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := user.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.Username(); !ok { + v := user.DefaultUsername + _c.mutation.SetUsername(v) + } + if _, ok := _c.mutation.Wechat(); !ok { + v := user.DefaultWechat + _c.mutation.SetWechat(v) + } + if _, ok := _c.mutation.Notes(); !ok { + v := user.DefaultNotes + _c.mutation.SetNotes(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "User.updated_at"`)} + } + if _, ok := _c.mutation.Email(); !ok { + return &ValidationError{Name: "email", err: errors.New(`ent: missing required field "User.email"`)} + } + if v, ok := _c.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if _, ok := _c.mutation.PasswordHash(); !ok { + return &ValidationError{Name: "password_hash", err: errors.New(`ent: missing required field "User.password_hash"`)} + } + if v, ok := _c.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if _, ok := _c.mutation.Role(); !ok { + return &ValidationError{Name: "role", err: errors.New(`ent: missing required field "User.role"`)} + } + if v, ok := _c.mutation.Role(); ok { + if err := user.RoleValidator(v); err != nil { + return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)} + } + } + if _, ok := _c.mutation.Balance(); !ok { + return &ValidationError{Name: "balance", err: errors.New(`ent: missing required field "User.balance"`)} + } + if _, ok := _c.mutation.Concurrency(); !ok { + return &ValidationError{Name: "concurrency", err: errors.New(`ent: missing required field "User.concurrency"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "User.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + if _, ok := _c.mutation.Username(); !ok { + return &ValidationError{Name: "username", err: errors.New(`ent: missing required field "User.username"`)} + } + if v, ok := _c.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + if _, ok := _c.mutation.Wechat(); !ok { + return &ValidationError{Name: "wechat", err: errors.New(`ent: missing required field "User.wechat"`)} + } + if v, ok := _c.mutation.Wechat(); ok { + if err := user.WechatValidator(v); err != nil { + return &ValidationError{Name: "wechat", err: fmt.Errorf(`ent: validator failed for field "User.wechat": %w`, err)} + } + } + if _, ok := _c.mutation.Notes(); !ok { + return &ValidationError{Name: "notes", err: errors.New(`ent: missing required field "User.notes"`)} + } + return nil +} + +func (_c *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { + var ( + _node = &User{config: _c.config} + _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(user.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + _node.Email = value + } + if value, ok := _c.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + _node.PasswordHash = value + } + if value, ok := _c.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeString, value) + _node.Role = value + } + if value, ok := _c.mutation.Balance(); ok { + _spec.SetField(user.FieldBalance, field.TypeFloat64, value) + _node.Balance = value + } + if value, ok := _c.mutation.Concurrency(); ok { + _spec.SetField(user.FieldConcurrency, field.TypeInt, value) + _node.Concurrency = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + _node.Username = value + } + if value, ok := _c.mutation.Wechat(); ok { + _spec.SetField(user.FieldWechat, field.TypeString, value) + _node.Wechat = value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(user.FieldNotes, field.TypeString, value) + _node.Notes = value + } + if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AssignedSubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AllowedGroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _c.config, mutation: newUserAllowedGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserCreate) OnConflict(opts ...sql.ConflictOption) *UserUpsertOne { + _c.conflict = opts + return &UserUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserCreate) OnConflictColumns(columns ...string) *UserUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertOne{ + create: _c, + } +} + +type ( + // UserUpsertOne is the builder for "upsert"-ing + // one User node. + UserUpsertOne struct { + create *UserCreate + } + + // UserUpsert is the "OnConflict" setter. + UserUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsert) SetUpdatedAt(v time.Time) *UserUpsert { + u.Set(user.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsert) UpdateUpdatedAt() *UserUpsert { + u.SetExcluded(user.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserUpsert) SetDeletedAt(v time.Time) *UserUpsert { + u.Set(user.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserUpsert) UpdateDeletedAt() *UserUpsert { + u.SetExcluded(user.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserUpsert) ClearDeletedAt() *UserUpsert { + u.SetNull(user.FieldDeletedAt) + return u +} + +// SetEmail sets the "email" field. +func (u *UserUpsert) SetEmail(v string) *UserUpsert { + u.Set(user.FieldEmail, v) + return u +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsert) UpdateEmail() *UserUpsert { + u.SetExcluded(user.FieldEmail) + return u +} + +// SetPasswordHash sets the "password_hash" field. +func (u *UserUpsert) SetPasswordHash(v string) *UserUpsert { + u.Set(user.FieldPasswordHash, v) + return u +} + +// UpdatePasswordHash sets the "password_hash" field to the value that was provided on create. +func (u *UserUpsert) UpdatePasswordHash() *UserUpsert { + u.SetExcluded(user.FieldPasswordHash) + return u +} + +// SetRole sets the "role" field. +func (u *UserUpsert) SetRole(v string) *UserUpsert { + u.Set(user.FieldRole, v) + return u +} + +// UpdateRole sets the "role" field to the value that was provided on create. +func (u *UserUpsert) UpdateRole() *UserUpsert { + u.SetExcluded(user.FieldRole) + return u +} + +// SetBalance sets the "balance" field. +func (u *UserUpsert) SetBalance(v float64) *UserUpsert { + u.Set(user.FieldBalance, v) + return u +} + +// UpdateBalance sets the "balance" field to the value that was provided on create. +func (u *UserUpsert) UpdateBalance() *UserUpsert { + u.SetExcluded(user.FieldBalance) + return u +} + +// AddBalance adds v to the "balance" field. +func (u *UserUpsert) AddBalance(v float64) *UserUpsert { + u.Add(user.FieldBalance, v) + return u +} + +// SetConcurrency sets the "concurrency" field. +func (u *UserUpsert) SetConcurrency(v int) *UserUpsert { + u.Set(user.FieldConcurrency, v) + return u +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *UserUpsert) UpdateConcurrency() *UserUpsert { + u.SetExcluded(user.FieldConcurrency) + return u +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *UserUpsert) AddConcurrency(v int) *UserUpsert { + u.Add(user.FieldConcurrency, v) + return u +} + +// SetStatus sets the "status" field. +func (u *UserUpsert) SetStatus(v string) *UserUpsert { + u.Set(user.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserUpsert) UpdateStatus() *UserUpsert { + u.SetExcluded(user.FieldStatus) + return u +} + +// SetUsername sets the "username" field. +func (u *UserUpsert) SetUsername(v string) *UserUpsert { + u.Set(user.FieldUsername, v) + return u +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsert) UpdateUsername() *UserUpsert { + u.SetExcluded(user.FieldUsername) + return u +} + +// SetWechat sets the "wechat" field. +func (u *UserUpsert) SetWechat(v string) *UserUpsert { + u.Set(user.FieldWechat, v) + return u +} + +// UpdateWechat sets the "wechat" field to the value that was provided on create. +func (u *UserUpsert) UpdateWechat() *UserUpsert { + u.SetExcluded(user.FieldWechat) + return u +} + +// SetNotes sets the "notes" field. +func (u *UserUpsert) SetNotes(v string) *UserUpsert { + u.Set(user.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserUpsert) UpdateNotes() *UserUpsert { + u.SetExcluded(user.FieldNotes) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserUpsertOne) UpdateNewValues() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(user.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertOne) Ignore() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertOne) DoNothing() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreate.OnConflict +// documentation for more info. +func (u *UserUpsertOne) Update(set func(*UserUpsert)) *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsertOne) SetUpdatedAt(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateUpdatedAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserUpsertOne) SetDeletedAt(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateDeletedAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserUpsertOne) ClearDeletedAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearDeletedAt() + }) +} + +// SetEmail sets the "email" field. +func (u *UserUpsertOne) SetEmail(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetEmail(v) + }) +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateEmail() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateEmail() + }) +} + +// SetPasswordHash sets the "password_hash" field. +func (u *UserUpsertOne) SetPasswordHash(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetPasswordHash(v) + }) +} + +// UpdatePasswordHash sets the "password_hash" field to the value that was provided on create. +func (u *UserUpsertOne) UpdatePasswordHash() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdatePasswordHash() + }) +} + +// SetRole sets the "role" field. +func (u *UserUpsertOne) SetRole(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetRole(v) + }) +} + +// UpdateRole sets the "role" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateRole() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateRole() + }) +} + +// SetBalance sets the "balance" field. +func (u *UserUpsertOne) SetBalance(v float64) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetBalance(v) + }) +} + +// AddBalance adds v to the "balance" field. +func (u *UserUpsertOne) AddBalance(v float64) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.AddBalance(v) + }) +} + +// UpdateBalance sets the "balance" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateBalance() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateBalance() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *UserUpsertOne) SetConcurrency(v int) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *UserUpsertOne) AddConcurrency(v int) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateConcurrency() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateConcurrency() + }) +} + +// SetStatus sets the "status" field. +func (u *UserUpsertOne) SetStatus(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateStatus() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateStatus() + }) +} + +// SetUsername sets the "username" field. +func (u *UserUpsertOne) SetUsername(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateUsername() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateUsername() + }) +} + +// SetWechat sets the "wechat" field. +func (u *UserUpsertOne) SetWechat(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetWechat(v) + }) +} + +// UpdateWechat sets the "wechat" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateWechat() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateWechat() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserUpsertOne) SetNotes(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateNotes() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateNotes() + }) +} + +// Exec executes the query. +func (u *UserUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserCreateBulk is the builder for creating many User entities in bulk. +type UserCreateBulk struct { + config + err error + builders []*UserCreate + conflict []sql.ConflictOption +} + +// Save creates the User entities in the database. +func (_c *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*User, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserCreateBulk) SaveX(ctx context.Context) []*User { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserUpsertBulk { + _c.conflict = opts + return &UserUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserCreateBulk) OnConflictColumns(columns ...string) *UserUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertBulk{ + create: _c, + } +} + +// UserUpsertBulk is the builder for "upsert"-ing +// a bulk of User nodes. +type UserUpsertBulk struct { + create *UserCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserUpsertBulk) UpdateNewValues() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(user.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertBulk) Ignore() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertBulk) DoNothing() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreateBulk.OnConflict +// documentation for more info. +func (u *UserUpsertBulk) Update(set func(*UserUpsert)) *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsertBulk) SetUpdatedAt(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateUpdatedAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserUpsertBulk) SetDeletedAt(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateDeletedAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserUpsertBulk) ClearDeletedAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearDeletedAt() + }) +} + +// SetEmail sets the "email" field. +func (u *UserUpsertBulk) SetEmail(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetEmail(v) + }) +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateEmail() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateEmail() + }) +} + +// SetPasswordHash sets the "password_hash" field. +func (u *UserUpsertBulk) SetPasswordHash(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetPasswordHash(v) + }) +} + +// UpdatePasswordHash sets the "password_hash" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdatePasswordHash() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdatePasswordHash() + }) +} + +// SetRole sets the "role" field. +func (u *UserUpsertBulk) SetRole(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetRole(v) + }) +} + +// UpdateRole sets the "role" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateRole() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateRole() + }) +} + +// SetBalance sets the "balance" field. +func (u *UserUpsertBulk) SetBalance(v float64) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetBalance(v) + }) +} + +// AddBalance adds v to the "balance" field. +func (u *UserUpsertBulk) AddBalance(v float64) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.AddBalance(v) + }) +} + +// UpdateBalance sets the "balance" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateBalance() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateBalance() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *UserUpsertBulk) SetConcurrency(v int) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *UserUpsertBulk) AddConcurrency(v int) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateConcurrency() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateConcurrency() + }) +} + +// SetStatus sets the "status" field. +func (u *UserUpsertBulk) SetStatus(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateStatus() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateStatus() + }) +} + +// SetUsername sets the "username" field. +func (u *UserUpsertBulk) SetUsername(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateUsername() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateUsername() + }) +} + +// SetWechat sets the "wechat" field. +func (u *UserUpsertBulk) SetWechat(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetWechat(v) + }) +} + +// UpdateWechat sets the "wechat" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateWechat() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateWechat() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserUpsertBulk) SetNotes(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateNotes() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateNotes() + }) +} + +// Exec executes the query. +func (u *UserUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/user_delete.go b/backend/ent/user_delete.go new file mode 100644 index 00000000..002ef1cf --- /dev/null +++ b/backend/ent/user_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// UserDelete is the builder for deleting a User entity. +type UserDelete struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserDelete builder. +func (_d *UserDelete) Where(ps ...predicate.User) *UserDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserDeleteOne is the builder for deleting a single User entity. +type UserDeleteOne struct { + _d *UserDelete +} + +// Where appends a list predicates to the UserDelete builder. +func (_d *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{user.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/user_query.go b/backend/ent/user_query.go new file mode 100644 index 00000000..21159a62 --- /dev/null +++ b/backend/ent/user_query.go @@ -0,0 +1,1014 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserQuery is the builder for querying User entities. +type UserQuery struct { + config + ctx *QueryContext + order []user.OrderOption + inters []Interceptor + predicates []predicate.User + withAPIKeys *ApiKeyQuery + withRedeemCodes *RedeemCodeQuery + withSubscriptions *UserSubscriptionQuery + withAssignedSubscriptions *UserSubscriptionQuery + withAllowedGroups *GroupQuery + withUserAllowedGroups *UserAllowedGroupQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserQuery builder. +func (_q *UserQuery) Where(ps ...predicate.User) *UserQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserQuery) Limit(limit int) *UserQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserQuery) Offset(offset int) *UserQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserQuery) Unique(unique bool) *UserQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserQuery) Order(o ...user.OrderOption) *UserQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAPIKeys chains the current query on the "api_keys" edge. +func (_q *UserQuery) QueryAPIKeys() *ApiKeyQuery { + query := (&ApiKeyClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.APIKeysTable, user.APIKeysColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryRedeemCodes chains the current query on the "redeem_codes" edge. +func (_q *UserQuery) QueryRedeemCodes() *RedeemCodeQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.RedeemCodesTable, user.RedeemCodesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QuerySubscriptions chains the current query on the "subscriptions" edge. +func (_q *UserQuery) QuerySubscriptions() *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.SubscriptionsTable, user.SubscriptionsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAssignedSubscriptions chains the current query on the "assigned_subscriptions" edge. +func (_q *UserQuery) QueryAssignedSubscriptions() *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AssignedSubscriptionsTable, user.AssignedSubscriptionsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAllowedGroups chains the current query on the "allowed_groups" edge. +func (_q *UserQuery) QueryAllowedGroups() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, user.AllowedGroupsTable, user.AllowedGroupsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUserAllowedGroups chains the current query on the "user_allowed_groups" edge. +func (_q *UserQuery) QueryUserAllowedGroups() *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.UserColumn), + sqlgraph.Edge(sqlgraph.O2M, true, user.UserAllowedGroupsTable, user.UserAllowedGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first User entity from the query. +// Returns a *NotFoundError when no User was found. +func (_q *UserQuery) First(ctx context.Context) (*User, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{user.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserQuery) FirstX(ctx context.Context) *User { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first User ID from the query. +// Returns a *NotFoundError when no User ID was found. +func (_q *UserQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{user.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single User entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one User entity is found. +// Returns a *NotFoundError when no User entities are found. +func (_q *UserQuery) Only(ctx context.Context) (*User, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{user.Label} + default: + return nil, &NotSingularError{user.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserQuery) OnlyX(ctx context.Context) *User { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only User ID in the query. +// Returns a *NotSingularError when more than one User ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{user.Label} + default: + err = &NotSingularError{user.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Users. +func (_q *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*User, *UserQuery]() + return withInterceptors[[]*User](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserQuery) AllX(ctx context.Context) []*User { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of User IDs. +func (_q *UserQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(user.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserQuery) Clone() *UserQuery { + if _q == nil { + return nil + } + return &UserQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]user.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.User{}, _q.predicates...), + withAPIKeys: _q.withAPIKeys.Clone(), + withRedeemCodes: _q.withRedeemCodes.Clone(), + withSubscriptions: _q.withSubscriptions.Clone(), + withAssignedSubscriptions: _q.withAssignedSubscriptions.Clone(), + withAllowedGroups: _q.withAllowedGroups.Clone(), + withUserAllowedGroups: _q.withUserAllowedGroups.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAPIKeys tells the query-builder to eager-load the nodes that are connected to +// the "api_keys" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAPIKeys(opts ...func(*ApiKeyQuery)) *UserQuery { + query := (&ApiKeyClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAPIKeys = query + return _q +} + +// WithRedeemCodes tells the query-builder to eager-load the nodes that are connected to +// the "redeem_codes" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithRedeemCodes(opts ...func(*RedeemCodeQuery)) *UserQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withRedeemCodes = query + return _q +} + +// WithSubscriptions tells the query-builder to eager-load the nodes that are connected to +// the "subscriptions" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithSubscriptions(opts ...func(*UserSubscriptionQuery)) *UserQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withSubscriptions = query + return _q +} + +// WithAssignedSubscriptions tells the query-builder to eager-load the nodes that are connected to +// the "assigned_subscriptions" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAssignedSubscriptions(opts ...func(*UserSubscriptionQuery)) *UserQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAssignedSubscriptions = query + return _q +} + +// WithAllowedGroups tells the query-builder to eager-load the nodes that are connected to +// the "allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAllowedGroups(opts ...func(*GroupQuery)) *UserQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAllowedGroups = query + return _q +} + +// WithUserAllowedGroups tells the query-builder to eager-load the nodes that are connected to +// the "user_allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithUserAllowedGroups(opts ...func(*UserAllowedGroupQuery)) *UserQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUserAllowedGroups = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.User.Query(). +// GroupBy(user.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = user.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.User.Query(). +// Select(user.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserQuery) Select(fields ...string) *UserSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: _q} + sbuild.label = user.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSelect configured with the given aggregations. +func (_q *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !user.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { + var ( + nodes = []*User{} + _spec = _q.querySpec() + loadedTypes = [6]bool{ + _q.withAPIKeys != nil, + _q.withRedeemCodes != nil, + _q.withSubscriptions != nil, + _q.withAssignedSubscriptions != nil, + _q.withAllowedGroups != nil, + _q.withUserAllowedGroups != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*User).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &User{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAPIKeys; query != nil { + if err := _q.loadAPIKeys(ctx, query, nodes, + func(n *User) { n.Edges.APIKeys = []*ApiKey{} }, + func(n *User, e *ApiKey) { n.Edges.APIKeys = append(n.Edges.APIKeys, e) }); err != nil { + return nil, err + } + } + if query := _q.withRedeemCodes; query != nil { + if err := _q.loadRedeemCodes(ctx, query, nodes, + func(n *User) { n.Edges.RedeemCodes = []*RedeemCode{} }, + func(n *User, e *RedeemCode) { n.Edges.RedeemCodes = append(n.Edges.RedeemCodes, e) }); err != nil { + return nil, err + } + } + if query := _q.withSubscriptions; query != nil { + if err := _q.loadSubscriptions(ctx, query, nodes, + func(n *User) { n.Edges.Subscriptions = []*UserSubscription{} }, + func(n *User, e *UserSubscription) { n.Edges.Subscriptions = append(n.Edges.Subscriptions, e) }); err != nil { + return nil, err + } + } + if query := _q.withAssignedSubscriptions; query != nil { + if err := _q.loadAssignedSubscriptions(ctx, query, nodes, + func(n *User) { n.Edges.AssignedSubscriptions = []*UserSubscription{} }, + func(n *User, e *UserSubscription) { + n.Edges.AssignedSubscriptions = append(n.Edges.AssignedSubscriptions, e) + }); err != nil { + return nil, err + } + } + if query := _q.withAllowedGroups; query != nil { + if err := _q.loadAllowedGroups(ctx, query, nodes, + func(n *User) { n.Edges.AllowedGroups = []*Group{} }, + func(n *User, e *Group) { n.Edges.AllowedGroups = append(n.Edges.AllowedGroups, e) }); err != nil { + return nil, err + } + } + if query := _q.withUserAllowedGroups; query != nil { + if err := _q.loadUserAllowedGroups(ctx, query, nodes, + func(n *User) { n.Edges.UserAllowedGroups = []*UserAllowedGroup{} }, + func(n *User, e *UserAllowedGroup) { n.Edges.UserAllowedGroups = append(n.Edges.UserAllowedGroups, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserQuery) loadAPIKeys(ctx context.Context, query *ApiKeyQuery, nodes []*User, init func(*User), assign func(*User, *ApiKey)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(apikey.FieldUserID) + } + query.Where(predicate.ApiKey(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.APIKeysColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadRedeemCodes(ctx context.Context, query *RedeemCodeQuery, nodes []*User, init func(*User), assign func(*User, *RedeemCode)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(redeemcode.FieldUsedBy) + } + query.Where(predicate.RedeemCode(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.RedeemCodesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UsedBy + if fk == nil { + return fmt.Errorf(`foreign-key "used_by" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "used_by" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadSubscriptions(ctx context.Context, query *UserSubscriptionQuery, nodes []*User, init func(*User), assign func(*User, *UserSubscription)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usersubscription.FieldUserID) + } + query.Where(predicate.UserSubscription(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.SubscriptionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadAssignedSubscriptions(ctx context.Context, query *UserSubscriptionQuery, nodes []*User, init func(*User), assign func(*User, *UserSubscription)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usersubscription.FieldAssignedBy) + } + query.Where(predicate.UserSubscription(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.AssignedSubscriptionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AssignedBy + if fk == nil { + return fmt.Errorf(`foreign-key "assigned_by" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "assigned_by" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadAllowedGroups(ctx context.Context, query *GroupQuery, nodes []*User, init func(*User), assign func(*User, *Group)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*User) + nids := make(map[int64]map[*User]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(user.AllowedGroupsTable) + s.Join(joinT).On(s.C(group.FieldID), joinT.C(user.AllowedGroupsPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(user.AllowedGroupsPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(user.AllowedGroupsPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*User]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Group](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "allowed_groups" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *UserQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllowedGroupQuery, nodes []*User, init func(*User), assign func(*User, *UserAllowedGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(userallowedgroup.FieldUserID) + } + query.Where(predicate.UserAllowedGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.UserAllowedGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} + +func (_q *UserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for i := range fields { + if fields[i] != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(user.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = user.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserGroupBy is the group-by builder for User entities. +type UserGroupBy struct { + selector + build *UserQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSelect is the builder for selecting fields of User entities. +type UserSelect struct { + *UserQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, _s.UserQuery, _s, _s.inters, v) +} + +func (_s *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/user_update.go b/backend/ent/user_update.go new file mode 100644 index 00000000..a00f9b8a --- /dev/null +++ b/backend/ent/user_update.go @@ -0,0 +1,1536 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserUpdate is the builder for updating User entities. +type UserUpdate struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserUpdate builder. +func (_u *UserUpdate) Where(ps ...predicate.User) *UserUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserUpdate) SetUpdatedAt(v time.Time) *UserUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserUpdate) SetDeletedAt(v time.Time) *UserUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserUpdate) SetNillableDeletedAt(v *time.Time) *UserUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserUpdate) ClearDeletedAt() *UserUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetEmail sets the "email" field. +func (_u *UserUpdate) SetEmail(v string) *UserUpdate { + _u.mutation.SetEmail(v) + return _u +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (_u *UserUpdate) SetNillableEmail(v *string) *UserUpdate { + if v != nil { + _u.SetEmail(*v) + } + return _u +} + +// SetPasswordHash sets the "password_hash" field. +func (_u *UserUpdate) SetPasswordHash(v string) *UserUpdate { + _u.mutation.SetPasswordHash(v) + return _u +} + +// SetNillablePasswordHash sets the "password_hash" field if the given value is not nil. +func (_u *UserUpdate) SetNillablePasswordHash(v *string) *UserUpdate { + if v != nil { + _u.SetPasswordHash(*v) + } + return _u +} + +// SetRole sets the "role" field. +func (_u *UserUpdate) SetRole(v string) *UserUpdate { + _u.mutation.SetRole(v) + return _u +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (_u *UserUpdate) SetNillableRole(v *string) *UserUpdate { + if v != nil { + _u.SetRole(*v) + } + return _u +} + +// SetBalance sets the "balance" field. +func (_u *UserUpdate) SetBalance(v float64) *UserUpdate { + _u.mutation.ResetBalance() + _u.mutation.SetBalance(v) + return _u +} + +// SetNillableBalance sets the "balance" field if the given value is not nil. +func (_u *UserUpdate) SetNillableBalance(v *float64) *UserUpdate { + if v != nil { + _u.SetBalance(*v) + } + return _u +} + +// AddBalance adds value to the "balance" field. +func (_u *UserUpdate) AddBalance(v float64) *UserUpdate { + _u.mutation.AddBalance(v) + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *UserUpdate) SetConcurrency(v int) *UserUpdate { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *UserUpdate) SetNillableConcurrency(v *int) *UserUpdate { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *UserUpdate) AddConcurrency(v int) *UserUpdate { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserUpdate) SetStatus(v string) *UserUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserUpdate) SetNillableStatus(v *string) *UserUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsername sets the "username" field. +func (_u *UserUpdate) SetUsername(v string) *UserUpdate { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *UserUpdate) SetNillableUsername(v *string) *UserUpdate { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// SetWechat sets the "wechat" field. +func (_u *UserUpdate) SetWechat(v string) *UserUpdate { + _u.mutation.SetWechat(v) + return _u +} + +// SetNillableWechat sets the "wechat" field if the given value is not nil. +func (_u *UserUpdate) SetNillableWechat(v *string) *UserUpdate { + if v != nil { + _u.SetWechat(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserUpdate) SetNotes(v string) *UserUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserUpdate) SetNillableNotes(v *string) *UserUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by IDs. +func (_u *UserUpdate) AddAPIKeyIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the ApiKey entity. +func (_u *UserUpdate) AddAPIKeys(v ...*ApiKey) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *UserUpdate) AddRedeemCodeIDs(ids ...int64) *UserUpdate { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdate) AddRedeemCodes(v ...*RedeemCode) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdate) AddSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) AddSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdate) AddAssignedSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAssignedSubscriptionIDs(ids...) + return _u +} + +// AddAssignedSubscriptions adds the "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) AddAssignedSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAssignedSubscriptionIDs(ids...) +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. +func (_u *UserUpdate) AddAllowedGroupIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAllowedGroupIDs(ids...) + return _u +} + +// AddAllowedGroups adds the "allowed_groups" edges to the Group entity. +func (_u *UserUpdate) AddAllowedGroups(v ...*Group) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedGroupIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (_u *UserUpdate) Mutation() *UserMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the ApiKey entity. +func (_u *UserUpdate) ClearAPIKeys() *UserUpdate { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to ApiKey entities by IDs. +func (_u *UserUpdate) RemoveAPIKeyIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to ApiKey entities. +func (_u *UserUpdate) RemoveAPIKeys(v ...*ApiKey) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdate) ClearRedeemCodes() *UserUpdate { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *UserUpdate) RemoveRedeemCodeIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *UserUpdate) RemoveRedeemCodes(v ...*RedeemCode) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) ClearSubscriptions() *UserUpdate { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdate) RemoveSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *UserUpdate) RemoveSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearAssignedSubscriptions clears all "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) ClearAssignedSubscriptions() *UserUpdate { + _u.mutation.ClearAssignedSubscriptions() + return _u +} + +// RemoveAssignedSubscriptionIDs removes the "assigned_subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdate) RemoveAssignedSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAssignedSubscriptionIDs(ids...) + return _u +} + +// RemoveAssignedSubscriptions removes "assigned_subscriptions" edges to UserSubscription entities. +func (_u *UserUpdate) RemoveAssignedSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAssignedSubscriptionIDs(ids...) +} + +// ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. +func (_u *UserUpdate) ClearAllowedGroups() *UserUpdate { + _u.mutation.ClearAllowedGroups() + return _u +} + +// RemoveAllowedGroupIDs removes the "allowed_groups" edge to Group entities by IDs. +func (_u *UserUpdate) RemoveAllowedGroupIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAllowedGroupIDs(ids...) + return _u +} + +// RemoveAllowedGroups removes "allowed_groups" edges to Group entities. +func (_u *UserUpdate) RemoveAllowedGroups(v ...*Group) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedGroupIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if user.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized user.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := user.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserUpdate) check() error { + if v, ok := _u.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if v, ok := _u.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if v, ok := _u.mutation.Role(); ok { + if err := user.RoleValidator(v); err != nil { + return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + if v, ok := _u.mutation.Wechat(); ok { + if err := user.WechatValidator(v); err != nil { + return &ValidationError{Name: "wechat", err: fmt.Errorf(`ent: validator failed for field "User.wechat": %w`, err)} + } + } + return nil +} + +func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(user.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(user.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := _u.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + } + if value, ok := _u.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeString, value) + } + if value, ok := _u.mutation.Balance(); ok { + _spec.SetField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBalance(); ok { + _spec.AddField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + } + if value, ok := _u.mutation.Wechat(); ok { + _spec.SetField(user.FieldWechat, field.TypeString, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(user.FieldNotes, field.TypeString, value) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAssignedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedSubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedGroupsIDs(); len(nodes) > 0 && !_u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedGroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserUpdateOne is the builder for updating a single User entity. +type UserUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserUpdateOne) SetUpdatedAt(v time.Time) *UserUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserUpdateOne) SetDeletedAt(v time.Time) *UserUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableDeletedAt(v *time.Time) *UserUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserUpdateOne) ClearDeletedAt() *UserUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetEmail sets the "email" field. +func (_u *UserUpdateOne) SetEmail(v string) *UserUpdateOne { + _u.mutation.SetEmail(v) + return _u +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableEmail(v *string) *UserUpdateOne { + if v != nil { + _u.SetEmail(*v) + } + return _u +} + +// SetPasswordHash sets the "password_hash" field. +func (_u *UserUpdateOne) SetPasswordHash(v string) *UserUpdateOne { + _u.mutation.SetPasswordHash(v) + return _u +} + +// SetNillablePasswordHash sets the "password_hash" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillablePasswordHash(v *string) *UserUpdateOne { + if v != nil { + _u.SetPasswordHash(*v) + } + return _u +} + +// SetRole sets the "role" field. +func (_u *UserUpdateOne) SetRole(v string) *UserUpdateOne { + _u.mutation.SetRole(v) + return _u +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableRole(v *string) *UserUpdateOne { + if v != nil { + _u.SetRole(*v) + } + return _u +} + +// SetBalance sets the "balance" field. +func (_u *UserUpdateOne) SetBalance(v float64) *UserUpdateOne { + _u.mutation.ResetBalance() + _u.mutation.SetBalance(v) + return _u +} + +// SetNillableBalance sets the "balance" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableBalance(v *float64) *UserUpdateOne { + if v != nil { + _u.SetBalance(*v) + } + return _u +} + +// AddBalance adds value to the "balance" field. +func (_u *UserUpdateOne) AddBalance(v float64) *UserUpdateOne { + _u.mutation.AddBalance(v) + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *UserUpdateOne) SetConcurrency(v int) *UserUpdateOne { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableConcurrency(v *int) *UserUpdateOne { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *UserUpdateOne) AddConcurrency(v int) *UserUpdateOne { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserUpdateOne) SetStatus(v string) *UserUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableStatus(v *string) *UserUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsername sets the "username" field. +func (_u *UserUpdateOne) SetUsername(v string) *UserUpdateOne { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableUsername(v *string) *UserUpdateOne { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// SetWechat sets the "wechat" field. +func (_u *UserUpdateOne) SetWechat(v string) *UserUpdateOne { + _u.mutation.SetWechat(v) + return _u +} + +// SetNillableWechat sets the "wechat" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableWechat(v *string) *UserUpdateOne { + if v != nil { + _u.SetWechat(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserUpdateOne) SetNotes(v string) *UserUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableNotes(v *string) *UserUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the ApiKey entity by IDs. +func (_u *UserUpdateOne) AddAPIKeyIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the ApiKey entity. +func (_u *UserUpdateOne) AddAPIKeys(v ...*ApiKey) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *UserUpdateOne) AddRedeemCodeIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdateOne) AddRedeemCodes(v ...*RedeemCode) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdateOne) AddSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) AddSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdateOne) AddAssignedSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAssignedSubscriptionIDs(ids...) + return _u +} + +// AddAssignedSubscriptions adds the "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) AddAssignedSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAssignedSubscriptionIDs(ids...) +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. +func (_u *UserUpdateOne) AddAllowedGroupIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAllowedGroupIDs(ids...) + return _u +} + +// AddAllowedGroups adds the "allowed_groups" edges to the Group entity. +func (_u *UserUpdateOne) AddAllowedGroups(v ...*Group) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedGroupIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (_u *UserUpdateOne) Mutation() *UserMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the ApiKey entity. +func (_u *UserUpdateOne) ClearAPIKeys() *UserUpdateOne { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to ApiKey entities by IDs. +func (_u *UserUpdateOne) RemoveAPIKeyIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to ApiKey entities. +func (_u *UserUpdateOne) RemoveAPIKeys(v ...*ApiKey) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdateOne) ClearRedeemCodes() *UserUpdateOne { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *UserUpdateOne) RemoveRedeemCodeIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *UserUpdateOne) RemoveRedeemCodes(v ...*RedeemCode) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) ClearSubscriptions() *UserUpdateOne { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdateOne) RemoveSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *UserUpdateOne) RemoveSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearAssignedSubscriptions clears all "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) ClearAssignedSubscriptions() *UserUpdateOne { + _u.mutation.ClearAssignedSubscriptions() + return _u +} + +// RemoveAssignedSubscriptionIDs removes the "assigned_subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdateOne) RemoveAssignedSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAssignedSubscriptionIDs(ids...) + return _u +} + +// RemoveAssignedSubscriptions removes "assigned_subscriptions" edges to UserSubscription entities. +func (_u *UserUpdateOne) RemoveAssignedSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAssignedSubscriptionIDs(ids...) +} + +// ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. +func (_u *UserUpdateOne) ClearAllowedGroups() *UserUpdateOne { + _u.mutation.ClearAllowedGroups() + return _u +} + +// RemoveAllowedGroupIDs removes the "allowed_groups" edge to Group entities by IDs. +func (_u *UserUpdateOne) RemoveAllowedGroupIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAllowedGroupIDs(ids...) + return _u +} + +// RemoveAllowedGroups removes "allowed_groups" edges to Group entities. +func (_u *UserUpdateOne) RemoveAllowedGroups(v ...*Group) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedGroupIDs(ids...) +} + +// Where appends a list predicates to the UserUpdate builder. +func (_u *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated User entity. +func (_u *UserUpdateOne) Save(ctx context.Context) (*User, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserUpdateOne) SaveX(ctx context.Context) *User { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if user.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized user.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := user.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserUpdateOne) check() error { + if v, ok := _u.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if v, ok := _u.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if v, ok := _u.mutation.Role(); ok { + if err := user.RoleValidator(v); err != nil { + return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + if v, ok := _u.mutation.Wechat(); ok { + if err := user.WechatValidator(v); err != nil { + return &ValidationError{Name: "wechat", err: fmt.Errorf(`ent: validator failed for field "User.wechat": %w`, err)} + } + } + return nil +} + +func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for _, f := range fields { + if !user.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(user.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(user.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := _u.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + } + if value, ok := _u.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeString, value) + } + if value, ok := _u.mutation.Balance(); ok { + _spec.SetField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBalance(); ok { + _spec.AddField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + } + if value, ok := _u.mutation.Wechat(); ok { + _spec.SetField(user.FieldWechat, field.TypeString, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(user.FieldNotes, field.TypeString, value) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAssignedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedSubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedGroupsIDs(); len(nodes) > 0 && !_u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedGroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &User{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/userallowedgroup.go b/backend/ent/userallowedgroup.go new file mode 100644 index 00000000..93cbd374 --- /dev/null +++ b/backend/ent/userallowedgroup.go @@ -0,0 +1,165 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroup is the model entity for the UserAllowedGroup schema. +type UserAllowedGroup struct { + config `json:"-"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID int64 `json:"group_id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserAllowedGroupQuery when eager-loading is set. + Edges UserAllowedGroupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserAllowedGroupEdges holds the relations/edges for other nodes in the graph. +type UserAllowedGroupEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAllowedGroupEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAllowedGroupEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserAllowedGroup) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case userallowedgroup.FieldUserID, userallowedgroup.FieldGroupID: + values[i] = new(sql.NullInt64) + case userallowedgroup.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserAllowedGroup fields. +func (_m *UserAllowedGroup) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case userallowedgroup.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case userallowedgroup.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = value.Int64 + } + case userallowedgroup.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UserAllowedGroup. +// This includes values selected through modifiers, order, etc. +func (_m *UserAllowedGroup) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the UserAllowedGroup entity. +func (_m *UserAllowedGroup) QueryUser() *UserQuery { + return NewUserAllowedGroupClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the UserAllowedGroup entity. +func (_m *UserAllowedGroup) QueryGroup() *GroupQuery { + return NewUserAllowedGroupClient(_m.config).QueryGroup(_m) +} + +// Update returns a builder for updating this UserAllowedGroup. +// Note that you need to call UserAllowedGroup.Unwrap() before calling this method if this UserAllowedGroup +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserAllowedGroup) Update() *UserAllowedGroupUpdateOne { + return NewUserAllowedGroupClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserAllowedGroup entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserAllowedGroup) Unwrap() *UserAllowedGroup { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserAllowedGroup is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserAllowedGroup) String() string { + var builder strings.Builder + builder.WriteString("UserAllowedGroup(") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", _m.GroupID)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// UserAllowedGroups is a parsable slice of UserAllowedGroup. +type UserAllowedGroups []*UserAllowedGroup diff --git a/backend/ent/userallowedgroup/userallowedgroup.go b/backend/ent/userallowedgroup/userallowedgroup.go new file mode 100644 index 00000000..56d604c8 --- /dev/null +++ b/backend/ent/userallowedgroup/userallowedgroup.go @@ -0,0 +1,113 @@ +// Code generated by ent, DO NOT EDIT. + +package userallowedgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the userallowedgroup type in the database. + Label = "user_allowed_group" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // UserFieldID holds the string denoting the ID field of the User. + UserFieldID = "id" + // GroupFieldID holds the string denoting the ID field of the Group. + GroupFieldID = "id" + // Table holds the table name of the userallowedgroup in the database. + Table = "user_allowed_groups" + // UserTable is the table that holds the user relation/edge. + UserTable = "user_allowed_groups" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "user_allowed_groups" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for userallowedgroup fields. +var Columns = []string{ + FieldUserID, + FieldGroupID, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the UserAllowedGroup queries. +type OrderOption func(*sql.Selector) + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, UserColumn), + sqlgraph.To(UserInverseTable, UserFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.To(GroupInverseTable, GroupFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) +} diff --git a/backend/ent/userallowedgroup/where.go b/backend/ent/userallowedgroup/where.go new file mode 100644 index 00000000..0951201b --- /dev/null +++ b/backend/ent/userallowedgroup/where.go @@ -0,0 +1,167 @@ +// Code generated by ent, DO NOT EDIT. + +package userallowedgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldUserID, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNotIn(FieldUserID, vs...)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, UserColumn), + sqlgraph.Edge(sqlgraph.M2O, false, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserAllowedGroup) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserAllowedGroup) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserAllowedGroup) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.NotPredicates(p)) +} diff --git a/backend/ent/userallowedgroup_create.go b/backend/ent/userallowedgroup_create.go new file mode 100644 index 00000000..2b04a757 --- /dev/null +++ b/backend/ent/userallowedgroup_create.go @@ -0,0 +1,568 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupCreate is the builder for creating a UserAllowedGroup entity. +type UserAllowedGroupCreate struct { + config + mutation *UserAllowedGroupMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetUserID sets the "user_id" field. +func (_c *UserAllowedGroupCreate) SetUserID(v int64) *UserAllowedGroupCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *UserAllowedGroupCreate) SetGroupID(v int64) *UserAllowedGroupCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserAllowedGroupCreate) SetCreatedAt(v time.Time) *UserAllowedGroupCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserAllowedGroupCreate) SetNillableCreatedAt(v *time.Time) *UserAllowedGroupCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *UserAllowedGroupCreate) SetUser(v *User) *UserAllowedGroupCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *UserAllowedGroupCreate) SetGroup(v *Group) *UserAllowedGroupCreate { + return _c.SetGroupID(v.ID) +} + +// Mutation returns the UserAllowedGroupMutation object of the builder. +func (_c *UserAllowedGroupCreate) Mutation() *UserAllowedGroupMutation { + return _c.mutation +} + +// Save creates the UserAllowedGroup in the database. +func (_c *UserAllowedGroupCreate) Save(ctx context.Context) (*UserAllowedGroup, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserAllowedGroupCreate) SaveX(ctx context.Context) *UserAllowedGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAllowedGroupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAllowedGroupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserAllowedGroupCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := userallowedgroup.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserAllowedGroupCreate) check() error { + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "UserAllowedGroup.user_id"`)} + } + if _, ok := _c.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "UserAllowedGroup.group_id"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserAllowedGroup.created_at"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "UserAllowedGroup.user"`)} + } + if len(_c.mutation.GroupIDs()) == 0 { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "UserAllowedGroup.group"`)} + } + return nil +} + +func (_c *UserAllowedGroupCreate) sqlSave(ctx context.Context) (*UserAllowedGroup, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} + +func (_c *UserAllowedGroupCreate) createSpec() (*UserAllowedGroup, *sqlgraph.CreateSpec) { + var ( + _node = &UserAllowedGroup{config: _c.config} + _spec = sqlgraph.NewCreateSpec(userallowedgroup.Table, nil) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(userallowedgroup.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAllowedGroup.Create(). +// SetUserID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAllowedGroupUpsert) { +// SetUserID(v+v). +// }). +// Exec(ctx) +func (_c *UserAllowedGroupCreate) OnConflict(opts ...sql.ConflictOption) *UserAllowedGroupUpsertOne { + _c.conflict = opts + return &UserAllowedGroupUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAllowedGroupCreate) OnConflictColumns(columns ...string) *UserAllowedGroupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAllowedGroupUpsertOne{ + create: _c, + } +} + +type ( + // UserAllowedGroupUpsertOne is the builder for "upsert"-ing + // one UserAllowedGroup node. + UserAllowedGroupUpsertOne struct { + create *UserAllowedGroupCreate + } + + // UserAllowedGroupUpsert is the "OnConflict" setter. + UserAllowedGroupUpsert struct { + *sql.UpdateSet + } +) + +// SetUserID sets the "user_id" field. +func (u *UserAllowedGroupUpsert) SetUserID(v int64) *UserAllowedGroupUpsert { + u.Set(userallowedgroup.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsert) UpdateUserID() *UserAllowedGroupUpsert { + u.SetExcluded(userallowedgroup.FieldUserID) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *UserAllowedGroupUpsert) SetGroupID(v int64) *UserAllowedGroupUpsert { + u.Set(userallowedgroup.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsert) UpdateGroupID() *UserAllowedGroupUpsert { + u.SetExcluded(userallowedgroup.FieldGroupID) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAllowedGroupUpsertOne) UpdateNewValues() *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(userallowedgroup.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAllowedGroupUpsertOne) Ignore() *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAllowedGroupUpsertOne) DoNothing() *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAllowedGroupCreate.OnConflict +// documentation for more info. +func (u *UserAllowedGroupUpsertOne) Update(set func(*UserAllowedGroupUpsert)) *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAllowedGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserAllowedGroupUpsertOne) SetUserID(v int64) *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertOne) UpdateUserID() *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserAllowedGroupUpsertOne) SetGroupID(v int64) *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertOne) UpdateGroupID() *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateGroupID() + }) +} + +// Exec executes the query. +func (u *UserAllowedGroupUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAllowedGroupCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAllowedGroupUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// UserAllowedGroupCreateBulk is the builder for creating many UserAllowedGroup entities in bulk. +type UserAllowedGroupCreateBulk struct { + config + err error + builders []*UserAllowedGroupCreate + conflict []sql.ConflictOption +} + +// Save creates the UserAllowedGroup entities in the database. +func (_c *UserAllowedGroupCreateBulk) Save(ctx context.Context) ([]*UserAllowedGroup, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserAllowedGroup, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserAllowedGroupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserAllowedGroupCreateBulk) SaveX(ctx context.Context) []*UserAllowedGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAllowedGroupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAllowedGroupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAllowedGroup.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAllowedGroupUpsert) { +// SetUserID(v+v). +// }). +// Exec(ctx) +func (_c *UserAllowedGroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserAllowedGroupUpsertBulk { + _c.conflict = opts + return &UserAllowedGroupUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAllowedGroupCreateBulk) OnConflictColumns(columns ...string) *UserAllowedGroupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAllowedGroupUpsertBulk{ + create: _c, + } +} + +// UserAllowedGroupUpsertBulk is the builder for "upsert"-ing +// a bulk of UserAllowedGroup nodes. +type UserAllowedGroupUpsertBulk struct { + create *UserAllowedGroupCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAllowedGroupUpsertBulk) UpdateNewValues() *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(userallowedgroup.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAllowedGroupUpsertBulk) Ignore() *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAllowedGroupUpsertBulk) DoNothing() *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAllowedGroupCreateBulk.OnConflict +// documentation for more info. +func (u *UserAllowedGroupUpsertBulk) Update(set func(*UserAllowedGroupUpsert)) *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAllowedGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserAllowedGroupUpsertBulk) SetUserID(v int64) *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertBulk) UpdateUserID() *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserAllowedGroupUpsertBulk) SetGroupID(v int64) *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertBulk) UpdateGroupID() *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateGroupID() + }) +} + +// Exec executes the query. +func (u *UserAllowedGroupUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserAllowedGroupCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAllowedGroupCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAllowedGroupUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userallowedgroup_delete.go b/backend/ent/userallowedgroup_delete.go new file mode 100644 index 00000000..e366ea97 --- /dev/null +++ b/backend/ent/userallowedgroup_delete.go @@ -0,0 +1,87 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupDelete is the builder for deleting a UserAllowedGroup entity. +type UserAllowedGroupDelete struct { + config + hooks []Hook + mutation *UserAllowedGroupMutation +} + +// Where appends a list predicates to the UserAllowedGroupDelete builder. +func (_d *UserAllowedGroupDelete) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserAllowedGroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAllowedGroupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserAllowedGroupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(userallowedgroup.Table, nil) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserAllowedGroupDeleteOne is the builder for deleting a single UserAllowedGroup entity. +type UserAllowedGroupDeleteOne struct { + _d *UserAllowedGroupDelete +} + +// Where appends a list predicates to the UserAllowedGroupDelete builder. +func (_d *UserAllowedGroupDeleteOne) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserAllowedGroupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{userallowedgroup.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAllowedGroupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userallowedgroup_query.go b/backend/ent/userallowedgroup_query.go new file mode 100644 index 00000000..da2c19a7 --- /dev/null +++ b/backend/ent/userallowedgroup_query.go @@ -0,0 +1,603 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupQuery is the builder for querying UserAllowedGroup entities. +type UserAllowedGroupQuery struct { + config + ctx *QueryContext + order []userallowedgroup.OrderOption + inters []Interceptor + predicates []predicate.UserAllowedGroup + withUser *UserQuery + withGroup *GroupQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserAllowedGroupQuery builder. +func (_q *UserAllowedGroupQuery) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserAllowedGroupQuery) Limit(limit int) *UserAllowedGroupQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserAllowedGroupQuery) Offset(offset int) *UserAllowedGroupQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserAllowedGroupQuery) Unique(unique bool) *UserAllowedGroupQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserAllowedGroupQuery) Order(o ...userallowedgroup.OrderOption) *UserAllowedGroupQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *UserAllowedGroupQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userallowedgroup.Table, userallowedgroup.UserColumn, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, userallowedgroup.UserTable, userallowedgroup.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *UserAllowedGroupQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userallowedgroup.Table, userallowedgroup.GroupColumn, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, userallowedgroup.GroupTable, userallowedgroup.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserAllowedGroup entity from the query. +// Returns a *NotFoundError when no UserAllowedGroup was found. +func (_q *UserAllowedGroupQuery) First(ctx context.Context) (*UserAllowedGroup, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{userallowedgroup.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) FirstX(ctx context.Context) *UserAllowedGroup { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// Only returns a single UserAllowedGroup entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserAllowedGroup entity is found. +// Returns a *NotFoundError when no UserAllowedGroup entities are found. +func (_q *UserAllowedGroupQuery) Only(ctx context.Context) (*UserAllowedGroup, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{userallowedgroup.Label} + default: + return nil, &NotSingularError{userallowedgroup.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) OnlyX(ctx context.Context) *UserAllowedGroup { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// All executes the query and returns a list of UserAllowedGroups. +func (_q *UserAllowedGroupQuery) All(ctx context.Context) ([]*UserAllowedGroup, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserAllowedGroup, *UserAllowedGroupQuery]() + return withInterceptors[[]*UserAllowedGroup](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) AllX(ctx context.Context) []*UserAllowedGroup { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// Count returns the count of the given query. +func (_q *UserAllowedGroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserAllowedGroupQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserAllowedGroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.First(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserAllowedGroupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserAllowedGroupQuery) Clone() *UserAllowedGroupQuery { + if _q == nil { + return nil + } + return &UserAllowedGroupQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]userallowedgroup.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserAllowedGroup{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAllowedGroupQuery) WithUser(opts ...func(*UserQuery)) *UserAllowedGroupQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAllowedGroupQuery) WithGroup(opts ...func(*GroupQuery)) *UserAllowedGroupQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// UserID int64 `json:"user_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserAllowedGroup.Query(). +// GroupBy(userallowedgroup.FieldUserID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserAllowedGroupQuery) GroupBy(field string, fields ...string) *UserAllowedGroupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserAllowedGroupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = userallowedgroup.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// UserID int64 `json:"user_id,omitempty"` +// } +// +// client.UserAllowedGroup.Query(). +// Select(userallowedgroup.FieldUserID). +// Scan(ctx, &v) +func (_q *UserAllowedGroupQuery) Select(fields ...string) *UserAllowedGroupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserAllowedGroupSelect{UserAllowedGroupQuery: _q} + sbuild.label = userallowedgroup.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserAllowedGroupSelect configured with the given aggregations. +func (_q *UserAllowedGroupQuery) Aggregate(fns ...AggregateFunc) *UserAllowedGroupSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserAllowedGroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !userallowedgroup.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserAllowedGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserAllowedGroup, error) { + var ( + nodes = []*UserAllowedGroup{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withUser != nil, + _q.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserAllowedGroup).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserAllowedGroup{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *UserAllowedGroup, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *UserAllowedGroup, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserAllowedGroupQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*UserAllowedGroup, init func(*UserAllowedGroup), assign func(*UserAllowedGroup, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAllowedGroup) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserAllowedGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*UserAllowedGroup, init func(*UserAllowedGroup), assign func(*UserAllowedGroup, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAllowedGroup) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *UserAllowedGroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Unique = false + _spec.Node.Columns = nil + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserAllowedGroupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(userallowedgroup.Table, userallowedgroup.Columns, nil) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + for i := range fields { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(userallowedgroup.FieldUserID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(userallowedgroup.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserAllowedGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(userallowedgroup.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = userallowedgroup.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserAllowedGroupGroupBy is the group-by builder for UserAllowedGroup entities. +type UserAllowedGroupGroupBy struct { + selector + build *UserAllowedGroupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserAllowedGroupGroupBy) Aggregate(fns ...AggregateFunc) *UserAllowedGroupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserAllowedGroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAllowedGroupQuery, *UserAllowedGroupGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserAllowedGroupGroupBy) sqlScan(ctx context.Context, root *UserAllowedGroupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserAllowedGroupSelect is the builder for selecting fields of UserAllowedGroup entities. +type UserAllowedGroupSelect struct { + *UserAllowedGroupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserAllowedGroupSelect) Aggregate(fns ...AggregateFunc) *UserAllowedGroupSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserAllowedGroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAllowedGroupQuery, *UserAllowedGroupSelect](ctx, _s.UserAllowedGroupQuery, _s, _s.inters, v) +} + +func (_s *UserAllowedGroupSelect) sqlScan(ctx context.Context, root *UserAllowedGroupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/userallowedgroup_update.go b/backend/ent/userallowedgroup_update.go new file mode 100644 index 00000000..27071b18 --- /dev/null +++ b/backend/ent/userallowedgroup_update.go @@ -0,0 +1,423 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupUpdate is the builder for updating UserAllowedGroup entities. +type UserAllowedGroupUpdate struct { + config + hooks []Hook + mutation *UserAllowedGroupMutation +} + +// Where appends a list predicates to the UserAllowedGroupUpdate builder. +func (_u *UserAllowedGroupUpdate) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserAllowedGroupUpdate) SetUserID(v int64) *UserAllowedGroupUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdate) SetNillableUserID(v *int64) *UserAllowedGroupUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserAllowedGroupUpdate) SetGroupID(v int64) *UserAllowedGroupUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdate) SetNillableGroupID(v *int64) *UserAllowedGroupUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdate) SetUser(v *User) *UserAllowedGroupUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdate) SetGroup(v *Group) *UserAllowedGroupUpdate { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the UserAllowedGroupMutation object of the builder. +func (_u *UserAllowedGroupUpdate) Mutation() *UserAllowedGroupMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdate) ClearUser() *UserAllowedGroupUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdate) ClearGroup() *UserAllowedGroupUpdate { + _u.mutation.ClearGroup() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserAllowedGroupUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAllowedGroupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserAllowedGroupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAllowedGroupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAllowedGroupUpdate) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.group"`) + } + return nil +} + +func (_u *UserAllowedGroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userallowedgroup.Table, userallowedgroup.Columns, sqlgraph.NewFieldSpec(userallowedgroup.FieldUserID, field.TypeInt64), sqlgraph.NewFieldSpec(userallowedgroup.FieldGroupID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userallowedgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserAllowedGroupUpdateOne is the builder for updating a single UserAllowedGroup entity. +type UserAllowedGroupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserAllowedGroupMutation +} + +// SetUserID sets the "user_id" field. +func (_u *UserAllowedGroupUpdateOne) SetUserID(v int64) *UserAllowedGroupUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdateOne) SetNillableUserID(v *int64) *UserAllowedGroupUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserAllowedGroupUpdateOne) SetGroupID(v int64) *UserAllowedGroupUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdateOne) SetNillableGroupID(v *int64) *UserAllowedGroupUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdateOne) SetUser(v *User) *UserAllowedGroupUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdateOne) SetGroup(v *Group) *UserAllowedGroupUpdateOne { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the UserAllowedGroupMutation object of the builder. +func (_u *UserAllowedGroupUpdateOne) Mutation() *UserAllowedGroupMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdateOne) ClearUser() *UserAllowedGroupUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdateOne) ClearGroup() *UserAllowedGroupUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// Where appends a list predicates to the UserAllowedGroupUpdate builder. +func (_u *UserAllowedGroupUpdateOne) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserAllowedGroupUpdateOne) Select(field string, fields ...string) *UserAllowedGroupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserAllowedGroup entity. +func (_u *UserAllowedGroupUpdateOne) Save(ctx context.Context) (*UserAllowedGroup, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAllowedGroupUpdateOne) SaveX(ctx context.Context) *UserAllowedGroup { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserAllowedGroupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAllowedGroupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAllowedGroupUpdateOne) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.group"`) + } + return nil +} + +func (_u *UserAllowedGroupUpdateOne) sqlSave(ctx context.Context) (_node *UserAllowedGroup, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userallowedgroup.Table, userallowedgroup.Columns, sqlgraph.NewFieldSpec(userallowedgroup.FieldUserID, field.TypeInt64), sqlgraph.NewFieldSpec(userallowedgroup.FieldGroupID, field.TypeInt64)) + if id, ok := _u.mutation.UserID(); !ok { + return nil, &ValidationError{Name: "user_id", err: errors.New(`ent: missing "UserAllowedGroup.user_id" for update`)} + } else { + _spec.Node.CompositeID[0].Value = id + } + if id, ok := _u.mutation.GroupID(); !ok { + return nil, &ValidationError{Name: "group_id", err: errors.New(`ent: missing "UserAllowedGroup.group_id" for update`)} + } else { + _spec.Node.CompositeID[1].Value = id + } + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, len(fields)) + for i, f := range fields { + if !userallowedgroup.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + _spec.Node.Columns[i] = f + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserAllowedGroup{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userallowedgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/usersubscription.go b/backend/ent/usersubscription.go new file mode 100644 index 00000000..3cfe9475 --- /dev/null +++ b/backend/ent/usersubscription.go @@ -0,0 +1,354 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscription is the model entity for the UserSubscription schema. +type UserSubscription struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID int64 `json:"group_id,omitempty"` + // StartsAt holds the value of the "starts_at" field. + StartsAt time.Time `json:"starts_at,omitempty"` + // ExpiresAt holds the value of the "expires_at" field. + ExpiresAt time.Time `json:"expires_at,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // DailyWindowStart holds the value of the "daily_window_start" field. + DailyWindowStart *time.Time `json:"daily_window_start,omitempty"` + // WeeklyWindowStart holds the value of the "weekly_window_start" field. + WeeklyWindowStart *time.Time `json:"weekly_window_start,omitempty"` + // MonthlyWindowStart holds the value of the "monthly_window_start" field. + MonthlyWindowStart *time.Time `json:"monthly_window_start,omitempty"` + // DailyUsageUsd holds the value of the "daily_usage_usd" field. + DailyUsageUsd float64 `json:"daily_usage_usd,omitempty"` + // WeeklyUsageUsd holds the value of the "weekly_usage_usd" field. + WeeklyUsageUsd float64 `json:"weekly_usage_usd,omitempty"` + // MonthlyUsageUsd holds the value of the "monthly_usage_usd" field. + MonthlyUsageUsd float64 `json:"monthly_usage_usd,omitempty"` + // AssignedBy holds the value of the "assigned_by" field. + AssignedBy *int64 `json:"assigned_by,omitempty"` + // AssignedAt holds the value of the "assigned_at" field. + AssignedAt time.Time `json:"assigned_at,omitempty"` + // Notes holds the value of the "notes" field. + Notes *string `json:"notes,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserSubscriptionQuery when eager-loading is set. + Edges UserSubscriptionEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserSubscriptionEdges holds the relations/edges for other nodes in the graph. +type UserSubscriptionEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // AssignedByUser holds the value of the assigned_by_user edge. + AssignedByUser *User `json:"assigned_by_user,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserSubscriptionEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserSubscriptionEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// AssignedByUserOrErr returns the AssignedByUser value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserSubscriptionEdges) AssignedByUserOrErr() (*User, error) { + if e.AssignedByUser != nil { + return e.AssignedByUser, nil + } else if e.loadedTypes[2] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "assigned_by_user"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserSubscription) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case usersubscription.FieldDailyUsageUsd, usersubscription.FieldWeeklyUsageUsd, usersubscription.FieldMonthlyUsageUsd: + values[i] = new(sql.NullFloat64) + case usersubscription.FieldID, usersubscription.FieldUserID, usersubscription.FieldGroupID, usersubscription.FieldAssignedBy: + values[i] = new(sql.NullInt64) + case usersubscription.FieldStatus, usersubscription.FieldNotes: + values[i] = new(sql.NullString) + case usersubscription.FieldCreatedAt, usersubscription.FieldUpdatedAt, usersubscription.FieldStartsAt, usersubscription.FieldExpiresAt, usersubscription.FieldDailyWindowStart, usersubscription.FieldWeeklyWindowStart, usersubscription.FieldMonthlyWindowStart, usersubscription.FieldAssignedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserSubscription fields. +func (_m *UserSubscription) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case usersubscription.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case usersubscription.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case usersubscription.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case usersubscription.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case usersubscription.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = value.Int64 + } + case usersubscription.FieldStartsAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field starts_at", values[i]) + } else if value.Valid { + _m.StartsAt = value.Time + } + case usersubscription.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + _m.ExpiresAt = value.Time + } + case usersubscription.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case usersubscription.FieldDailyWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field daily_window_start", values[i]) + } else if value.Valid { + _m.DailyWindowStart = new(time.Time) + *_m.DailyWindowStart = value.Time + } + case usersubscription.FieldWeeklyWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field weekly_window_start", values[i]) + } else if value.Valid { + _m.WeeklyWindowStart = new(time.Time) + *_m.WeeklyWindowStart = value.Time + } + case usersubscription.FieldMonthlyWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field monthly_window_start", values[i]) + } else if value.Valid { + _m.MonthlyWindowStart = new(time.Time) + *_m.MonthlyWindowStart = value.Time + } + case usersubscription.FieldDailyUsageUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field daily_usage_usd", values[i]) + } else if value.Valid { + _m.DailyUsageUsd = value.Float64 + } + case usersubscription.FieldWeeklyUsageUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field weekly_usage_usd", values[i]) + } else if value.Valid { + _m.WeeklyUsageUsd = value.Float64 + } + case usersubscription.FieldMonthlyUsageUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field monthly_usage_usd", values[i]) + } else if value.Valid { + _m.MonthlyUsageUsd = value.Float64 + } + case usersubscription.FieldAssignedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field assigned_by", values[i]) + } else if value.Valid { + _m.AssignedBy = new(int64) + *_m.AssignedBy = value.Int64 + } + case usersubscription.FieldAssignedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field assigned_at", values[i]) + } else if value.Valid { + _m.AssignedAt = value.Time + } + case usersubscription.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = new(string) + *_m.Notes = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UserSubscription. +// This includes values selected through modifiers, order, etc. +func (_m *UserSubscription) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the UserSubscription entity. +func (_m *UserSubscription) QueryUser() *UserQuery { + return NewUserSubscriptionClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the UserSubscription entity. +func (_m *UserSubscription) QueryGroup() *GroupQuery { + return NewUserSubscriptionClient(_m.config).QueryGroup(_m) +} + +// QueryAssignedByUser queries the "assigned_by_user" edge of the UserSubscription entity. +func (_m *UserSubscription) QueryAssignedByUser() *UserQuery { + return NewUserSubscriptionClient(_m.config).QueryAssignedByUser(_m) +} + +// Update returns a builder for updating this UserSubscription. +// Note that you need to call UserSubscription.Unwrap() before calling this method if this UserSubscription +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserSubscription) Update() *UserSubscriptionUpdateOne { + return NewUserSubscriptionClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserSubscription entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserSubscription) Unwrap() *UserSubscription { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserSubscription is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserSubscription) String() string { + var builder strings.Builder + builder.WriteString("UserSubscription(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", _m.GroupID)) + builder.WriteString(", ") + builder.WriteString("starts_at=") + builder.WriteString(_m.StartsAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("expires_at=") + builder.WriteString(_m.ExpiresAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.DailyWindowStart; v != nil { + builder.WriteString("daily_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.WeeklyWindowStart; v != nil { + builder.WriteString("weekly_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.MonthlyWindowStart; v != nil { + builder.WriteString("monthly_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("daily_usage_usd=") + builder.WriteString(fmt.Sprintf("%v", _m.DailyUsageUsd)) + builder.WriteString(", ") + builder.WriteString("weekly_usage_usd=") + builder.WriteString(fmt.Sprintf("%v", _m.WeeklyUsageUsd)) + builder.WriteString(", ") + builder.WriteString("monthly_usage_usd=") + builder.WriteString(fmt.Sprintf("%v", _m.MonthlyUsageUsd)) + builder.WriteString(", ") + if v := _m.AssignedBy; v != nil { + builder.WriteString("assigned_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("assigned_at=") + builder.WriteString(_m.AssignedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.Notes; v != nil { + builder.WriteString("notes=") + builder.WriteString(*v) + } + builder.WriteByte(')') + return builder.String() +} + +// UserSubscriptions is a parsable slice of UserSubscription. +type UserSubscriptions []*UserSubscription diff --git a/backend/ent/usersubscription/usersubscription.go b/backend/ent/usersubscription/usersubscription.go new file mode 100644 index 00000000..f4f7fa82 --- /dev/null +++ b/backend/ent/usersubscription/usersubscription.go @@ -0,0 +1,260 @@ +// Code generated by ent, DO NOT EDIT. + +package usersubscription + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the usersubscription type in the database. + Label = "user_subscription" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldStartsAt holds the string denoting the starts_at field in the database. + FieldStartsAt = "starts_at" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldDailyWindowStart holds the string denoting the daily_window_start field in the database. + FieldDailyWindowStart = "daily_window_start" + // FieldWeeklyWindowStart holds the string denoting the weekly_window_start field in the database. + FieldWeeklyWindowStart = "weekly_window_start" + // FieldMonthlyWindowStart holds the string denoting the monthly_window_start field in the database. + FieldMonthlyWindowStart = "monthly_window_start" + // FieldDailyUsageUsd holds the string denoting the daily_usage_usd field in the database. + FieldDailyUsageUsd = "daily_usage_usd" + // FieldWeeklyUsageUsd holds the string denoting the weekly_usage_usd field in the database. + FieldWeeklyUsageUsd = "weekly_usage_usd" + // FieldMonthlyUsageUsd holds the string denoting the monthly_usage_usd field in the database. + FieldMonthlyUsageUsd = "monthly_usage_usd" + // FieldAssignedBy holds the string denoting the assigned_by field in the database. + FieldAssignedBy = "assigned_by" + // FieldAssignedAt holds the string denoting the assigned_at field in the database. + FieldAssignedAt = "assigned_at" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // EdgeAssignedByUser holds the string denoting the assigned_by_user edge name in mutations. + EdgeAssignedByUser = "assigned_by_user" + // Table holds the table name of the usersubscription in the database. + Table = "user_subscriptions" + // UserTable is the table that holds the user relation/edge. + UserTable = "user_subscriptions" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "user_subscriptions" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" + // AssignedByUserTable is the table that holds the assigned_by_user relation/edge. + AssignedByUserTable = "user_subscriptions" + // AssignedByUserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + AssignedByUserInverseTable = "users" + // AssignedByUserColumn is the table column denoting the assigned_by_user relation/edge. + AssignedByUserColumn = "assigned_by" +) + +// Columns holds all SQL columns for usersubscription fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldUserID, + FieldGroupID, + FieldStartsAt, + FieldExpiresAt, + FieldStatus, + FieldDailyWindowStart, + FieldWeeklyWindowStart, + FieldMonthlyWindowStart, + FieldDailyUsageUsd, + FieldWeeklyUsageUsd, + FieldMonthlyUsageUsd, + FieldAssignedBy, + FieldAssignedAt, + FieldNotes, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultDailyUsageUsd holds the default value on creation for the "daily_usage_usd" field. + DefaultDailyUsageUsd float64 + // DefaultWeeklyUsageUsd holds the default value on creation for the "weekly_usage_usd" field. + DefaultWeeklyUsageUsd float64 + // DefaultMonthlyUsageUsd holds the default value on creation for the "monthly_usage_usd" field. + DefaultMonthlyUsageUsd float64 + // DefaultAssignedAt holds the default value on creation for the "assigned_at" field. + DefaultAssignedAt func() time.Time +) + +// OrderOption defines the ordering options for the UserSubscription queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByStartsAt orders the results by the starts_at field. +func ByStartsAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartsAt, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByDailyWindowStart orders the results by the daily_window_start field. +func ByDailyWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDailyWindowStart, opts...).ToFunc() +} + +// ByWeeklyWindowStart orders the results by the weekly_window_start field. +func ByWeeklyWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWeeklyWindowStart, opts...).ToFunc() +} + +// ByMonthlyWindowStart orders the results by the monthly_window_start field. +func ByMonthlyWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonthlyWindowStart, opts...).ToFunc() +} + +// ByDailyUsageUsd orders the results by the daily_usage_usd field. +func ByDailyUsageUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDailyUsageUsd, opts...).ToFunc() +} + +// ByWeeklyUsageUsd orders the results by the weekly_usage_usd field. +func ByWeeklyUsageUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWeeklyUsageUsd, opts...).ToFunc() +} + +// ByMonthlyUsageUsd orders the results by the monthly_usage_usd field. +func ByMonthlyUsageUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonthlyUsageUsd, opts...).ToFunc() +} + +// ByAssignedBy orders the results by the assigned_by field. +func ByAssignedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAssignedBy, opts...).ToFunc() +} + +// ByAssignedAt orders the results by the assigned_at field. +func ByAssignedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAssignedAt, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAssignedByUserField orders the results by assigned_by_user field. +func ByAssignedByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAssignedByUserStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newAssignedByUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AssignedByUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AssignedByUserTable, AssignedByUserColumn), + ) +} diff --git a/backend/ent/usersubscription/where.go b/backend/ent/usersubscription/where.go new file mode 100644 index 00000000..f6060d95 --- /dev/null +++ b/backend/ent/usersubscription/where.go @@ -0,0 +1,900 @@ +// Code generated by ent, DO NOT EDIT. + +package usersubscription + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUserID, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldGroupID, v)) +} + +// StartsAt applies equality check predicate on the "starts_at" field. It's identical to StartsAtEQ. +func StartsAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStartsAt, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldExpiresAt, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStatus, v)) +} + +// DailyWindowStart applies equality check predicate on the "daily_window_start" field. It's identical to DailyWindowStartEQ. +func DailyWindowStart(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyWindowStart, v)) +} + +// WeeklyWindowStart applies equality check predicate on the "weekly_window_start" field. It's identical to WeeklyWindowStartEQ. +func WeeklyWindowStart(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyWindowStart, v)) +} + +// MonthlyWindowStart applies equality check predicate on the "monthly_window_start" field. It's identical to MonthlyWindowStartEQ. +func MonthlyWindowStart(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyWindowStart, v)) +} + +// DailyUsageUsd applies equality check predicate on the "daily_usage_usd" field. It's identical to DailyUsageUsdEQ. +func DailyUsageUsd(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyUsageUsd, v)) +} + +// WeeklyUsageUsd applies equality check predicate on the "weekly_usage_usd" field. It's identical to WeeklyUsageUsdEQ. +func WeeklyUsageUsd(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyUsageUsd, v)) +} + +// MonthlyUsageUsd applies equality check predicate on the "monthly_usage_usd" field. It's identical to MonthlyUsageUsdEQ. +func MonthlyUsageUsd(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyUsageUsd, v)) +} + +// AssignedBy applies equality check predicate on the "assigned_by" field. It's identical to AssignedByEQ. +func AssignedBy(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedBy, v)) +} + +// AssignedAt applies equality check predicate on the "assigned_at" field. It's identical to AssignedAtEQ. +func AssignedAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedAt, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldUserID, vs...)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// StartsAtEQ applies the EQ predicate on the "starts_at" field. +func StartsAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStartsAt, v)) +} + +// StartsAtNEQ applies the NEQ predicate on the "starts_at" field. +func StartsAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldStartsAt, v)) +} + +// StartsAtIn applies the In predicate on the "starts_at" field. +func StartsAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldStartsAt, vs...)) +} + +// StartsAtNotIn applies the NotIn predicate on the "starts_at" field. +func StartsAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldStartsAt, vs...)) +} + +// StartsAtGT applies the GT predicate on the "starts_at" field. +func StartsAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldStartsAt, v)) +} + +// StartsAtGTE applies the GTE predicate on the "starts_at" field. +func StartsAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldStartsAt, v)) +} + +// StartsAtLT applies the LT predicate on the "starts_at" field. +func StartsAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldStartsAt, v)) +} + +// StartsAtLTE applies the LTE predicate on the "starts_at" field. +func StartsAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldStartsAt, v)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldExpiresAt, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContainsFold(FieldStatus, v)) +} + +// DailyWindowStartEQ applies the EQ predicate on the "daily_window_start" field. +func DailyWindowStartEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyWindowStart, v)) +} + +// DailyWindowStartNEQ applies the NEQ predicate on the "daily_window_start" field. +func DailyWindowStartNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldDailyWindowStart, v)) +} + +// DailyWindowStartIn applies the In predicate on the "daily_window_start" field. +func DailyWindowStartIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldDailyWindowStart, vs...)) +} + +// DailyWindowStartNotIn applies the NotIn predicate on the "daily_window_start" field. +func DailyWindowStartNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldDailyWindowStart, vs...)) +} + +// DailyWindowStartGT applies the GT predicate on the "daily_window_start" field. +func DailyWindowStartGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldDailyWindowStart, v)) +} + +// DailyWindowStartGTE applies the GTE predicate on the "daily_window_start" field. +func DailyWindowStartGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldDailyWindowStart, v)) +} + +// DailyWindowStartLT applies the LT predicate on the "daily_window_start" field. +func DailyWindowStartLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldDailyWindowStart, v)) +} + +// DailyWindowStartLTE applies the LTE predicate on the "daily_window_start" field. +func DailyWindowStartLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldDailyWindowStart, v)) +} + +// DailyWindowStartIsNil applies the IsNil predicate on the "daily_window_start" field. +func DailyWindowStartIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldDailyWindowStart)) +} + +// DailyWindowStartNotNil applies the NotNil predicate on the "daily_window_start" field. +func DailyWindowStartNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldDailyWindowStart)) +} + +// WeeklyWindowStartEQ applies the EQ predicate on the "weekly_window_start" field. +func WeeklyWindowStartEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartNEQ applies the NEQ predicate on the "weekly_window_start" field. +func WeeklyWindowStartNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartIn applies the In predicate on the "weekly_window_start" field. +func WeeklyWindowStartIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldWeeklyWindowStart, vs...)) +} + +// WeeklyWindowStartNotIn applies the NotIn predicate on the "weekly_window_start" field. +func WeeklyWindowStartNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldWeeklyWindowStart, vs...)) +} + +// WeeklyWindowStartGT applies the GT predicate on the "weekly_window_start" field. +func WeeklyWindowStartGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartGTE applies the GTE predicate on the "weekly_window_start" field. +func WeeklyWindowStartGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartLT applies the LT predicate on the "weekly_window_start" field. +func WeeklyWindowStartLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartLTE applies the LTE predicate on the "weekly_window_start" field. +func WeeklyWindowStartLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartIsNil applies the IsNil predicate on the "weekly_window_start" field. +func WeeklyWindowStartIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldWeeklyWindowStart)) +} + +// WeeklyWindowStartNotNil applies the NotNil predicate on the "weekly_window_start" field. +func WeeklyWindowStartNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldWeeklyWindowStart)) +} + +// MonthlyWindowStartEQ applies the EQ predicate on the "monthly_window_start" field. +func MonthlyWindowStartEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartNEQ applies the NEQ predicate on the "monthly_window_start" field. +func MonthlyWindowStartNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartIn applies the In predicate on the "monthly_window_start" field. +func MonthlyWindowStartIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldMonthlyWindowStart, vs...)) +} + +// MonthlyWindowStartNotIn applies the NotIn predicate on the "monthly_window_start" field. +func MonthlyWindowStartNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldMonthlyWindowStart, vs...)) +} + +// MonthlyWindowStartGT applies the GT predicate on the "monthly_window_start" field. +func MonthlyWindowStartGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartGTE applies the GTE predicate on the "monthly_window_start" field. +func MonthlyWindowStartGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartLT applies the LT predicate on the "monthly_window_start" field. +func MonthlyWindowStartLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartLTE applies the LTE predicate on the "monthly_window_start" field. +func MonthlyWindowStartLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartIsNil applies the IsNil predicate on the "monthly_window_start" field. +func MonthlyWindowStartIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldMonthlyWindowStart)) +} + +// MonthlyWindowStartNotNil applies the NotNil predicate on the "monthly_window_start" field. +func MonthlyWindowStartNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldMonthlyWindowStart)) +} + +// DailyUsageUsdEQ applies the EQ predicate on the "daily_usage_usd" field. +func DailyUsageUsdEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdNEQ applies the NEQ predicate on the "daily_usage_usd" field. +func DailyUsageUsdNEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdIn applies the In predicate on the "daily_usage_usd" field. +func DailyUsageUsdIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldDailyUsageUsd, vs...)) +} + +// DailyUsageUsdNotIn applies the NotIn predicate on the "daily_usage_usd" field. +func DailyUsageUsdNotIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldDailyUsageUsd, vs...)) +} + +// DailyUsageUsdGT applies the GT predicate on the "daily_usage_usd" field. +func DailyUsageUsdGT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdGTE applies the GTE predicate on the "daily_usage_usd" field. +func DailyUsageUsdGTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdLT applies the LT predicate on the "daily_usage_usd" field. +func DailyUsageUsdLT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdLTE applies the LTE predicate on the "daily_usage_usd" field. +func DailyUsageUsdLTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldDailyUsageUsd, v)) +} + +// WeeklyUsageUsdEQ applies the EQ predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdNEQ applies the NEQ predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdNEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdIn applies the In predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldWeeklyUsageUsd, vs...)) +} + +// WeeklyUsageUsdNotIn applies the NotIn predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdNotIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldWeeklyUsageUsd, vs...)) +} + +// WeeklyUsageUsdGT applies the GT predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdGT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdGTE applies the GTE predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdGTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdLT applies the LT predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdLT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdLTE applies the LTE predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdLTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldWeeklyUsageUsd, v)) +} + +// MonthlyUsageUsdEQ applies the EQ predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdNEQ applies the NEQ predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdNEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdIn applies the In predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldMonthlyUsageUsd, vs...)) +} + +// MonthlyUsageUsdNotIn applies the NotIn predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdNotIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldMonthlyUsageUsd, vs...)) +} + +// MonthlyUsageUsdGT applies the GT predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdGT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdGTE applies the GTE predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdGTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdLT applies the LT predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdLT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdLTE applies the LTE predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdLTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldMonthlyUsageUsd, v)) +} + +// AssignedByEQ applies the EQ predicate on the "assigned_by" field. +func AssignedByEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedBy, v)) +} + +// AssignedByNEQ applies the NEQ predicate on the "assigned_by" field. +func AssignedByNEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldAssignedBy, v)) +} + +// AssignedByIn applies the In predicate on the "assigned_by" field. +func AssignedByIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldAssignedBy, vs...)) +} + +// AssignedByNotIn applies the NotIn predicate on the "assigned_by" field. +func AssignedByNotIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldAssignedBy, vs...)) +} + +// AssignedByIsNil applies the IsNil predicate on the "assigned_by" field. +func AssignedByIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldAssignedBy)) +} + +// AssignedByNotNil applies the NotNil predicate on the "assigned_by" field. +func AssignedByNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldAssignedBy)) +} + +// AssignedAtEQ applies the EQ predicate on the "assigned_at" field. +func AssignedAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedAt, v)) +} + +// AssignedAtNEQ applies the NEQ predicate on the "assigned_at" field. +func AssignedAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldAssignedAt, v)) +} + +// AssignedAtIn applies the In predicate on the "assigned_at" field. +func AssignedAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldAssignedAt, vs...)) +} + +// AssignedAtNotIn applies the NotIn predicate on the "assigned_at" field. +func AssignedAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldAssignedAt, vs...)) +} + +// AssignedAtGT applies the GT predicate on the "assigned_at" field. +func AssignedAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldAssignedAt, v)) +} + +// AssignedAtGTE applies the GTE predicate on the "assigned_at" field. +func AssignedAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldAssignedAt, v)) +} + +// AssignedAtLT applies the LT predicate on the "assigned_at" field. +func AssignedAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldAssignedAt, v)) +} + +// AssignedAtLTE applies the LTE predicate on the "assigned_at" field. +func AssignedAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldAssignedAt, v)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesIsNil applies the IsNil predicate on the "notes" field. +func NotesIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldNotes)) +} + +// NotesNotNil applies the NotNil predicate on the "notes" field. +func NotesNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldNotes)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContainsFold(FieldNotes, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAssignedByUser applies the HasEdge predicate on the "assigned_by_user" edge. +func HasAssignedByUser() predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AssignedByUserTable, AssignedByUserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAssignedByUserWith applies the HasEdge predicate on the "assigned_by_user" edge with a given conditions (other predicates). +func HasAssignedByUserWith(preds ...predicate.User) predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := newAssignedByUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserSubscription) predicate.UserSubscription { + return predicate.UserSubscription(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserSubscription) predicate.UserSubscription { + return predicate.UserSubscription(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserSubscription) predicate.UserSubscription { + return predicate.UserSubscription(sql.NotPredicates(p)) +} diff --git a/backend/ent/usersubscription_create.go b/backend/ent/usersubscription_create.go new file mode 100644 index 00000000..43997f64 --- /dev/null +++ b/backend/ent/usersubscription_create.go @@ -0,0 +1,1578 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionCreate is the builder for creating a UserSubscription entity. +type UserSubscriptionCreate struct { + config + mutation *UserSubscriptionMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserSubscriptionCreate) SetCreatedAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableCreatedAt(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserSubscriptionCreate) SetUpdatedAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableUpdatedAt(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *UserSubscriptionCreate) SetUserID(v int64) *UserSubscriptionCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *UserSubscriptionCreate) SetGroupID(v int64) *UserSubscriptionCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetStartsAt sets the "starts_at" field. +func (_c *UserSubscriptionCreate) SetStartsAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetStartsAt(v) + return _c +} + +// SetExpiresAt sets the "expires_at" field. +func (_c *UserSubscriptionCreate) SetExpiresAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetExpiresAt(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *UserSubscriptionCreate) SetStatus(v string) *UserSubscriptionCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableStatus(v *string) *UserSubscriptionCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (_c *UserSubscriptionCreate) SetDailyWindowStart(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetDailyWindowStart(v) + return _c +} + +// SetNillableDailyWindowStart sets the "daily_window_start" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableDailyWindowStart(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetDailyWindowStart(*v) + } + return _c +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (_c *UserSubscriptionCreate) SetWeeklyWindowStart(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetWeeklyWindowStart(v) + return _c +} + +// SetNillableWeeklyWindowStart sets the "weekly_window_start" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableWeeklyWindowStart(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetWeeklyWindowStart(*v) + } + return _c +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (_c *UserSubscriptionCreate) SetMonthlyWindowStart(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetMonthlyWindowStart(v) + return _c +} + +// SetNillableMonthlyWindowStart sets the "monthly_window_start" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableMonthlyWindowStart(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetMonthlyWindowStart(*v) + } + return _c +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (_c *UserSubscriptionCreate) SetDailyUsageUsd(v float64) *UserSubscriptionCreate { + _c.mutation.SetDailyUsageUsd(v) + return _c +} + +// SetNillableDailyUsageUsd sets the "daily_usage_usd" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableDailyUsageUsd(v *float64) *UserSubscriptionCreate { + if v != nil { + _c.SetDailyUsageUsd(*v) + } + return _c +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (_c *UserSubscriptionCreate) SetWeeklyUsageUsd(v float64) *UserSubscriptionCreate { + _c.mutation.SetWeeklyUsageUsd(v) + return _c +} + +// SetNillableWeeklyUsageUsd sets the "weekly_usage_usd" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableWeeklyUsageUsd(v *float64) *UserSubscriptionCreate { + if v != nil { + _c.SetWeeklyUsageUsd(*v) + } + return _c +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (_c *UserSubscriptionCreate) SetMonthlyUsageUsd(v float64) *UserSubscriptionCreate { + _c.mutation.SetMonthlyUsageUsd(v) + return _c +} + +// SetNillableMonthlyUsageUsd sets the "monthly_usage_usd" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableMonthlyUsageUsd(v *float64) *UserSubscriptionCreate { + if v != nil { + _c.SetMonthlyUsageUsd(*v) + } + return _c +} + +// SetAssignedBy sets the "assigned_by" field. +func (_c *UserSubscriptionCreate) SetAssignedBy(v int64) *UserSubscriptionCreate { + _c.mutation.SetAssignedBy(v) + return _c +} + +// SetNillableAssignedBy sets the "assigned_by" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableAssignedBy(v *int64) *UserSubscriptionCreate { + if v != nil { + _c.SetAssignedBy(*v) + } + return _c +} + +// SetAssignedAt sets the "assigned_at" field. +func (_c *UserSubscriptionCreate) SetAssignedAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetAssignedAt(v) + return _c +} + +// SetNillableAssignedAt sets the "assigned_at" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableAssignedAt(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetAssignedAt(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *UserSubscriptionCreate) SetNotes(v string) *UserSubscriptionCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableNotes(v *string) *UserSubscriptionCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *UserSubscriptionCreate) SetUser(v *User) *UserSubscriptionCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *UserSubscriptionCreate) SetGroup(v *Group) *UserSubscriptionCreate { + return _c.SetGroupID(v.ID) +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID. +func (_c *UserSubscriptionCreate) SetAssignedByUserID(id int64) *UserSubscriptionCreate { + _c.mutation.SetAssignedByUserID(id) + return _c +} + +// SetNillableAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableAssignedByUserID(id *int64) *UserSubscriptionCreate { + if id != nil { + _c = _c.SetAssignedByUserID(*id) + } + return _c +} + +// SetAssignedByUser sets the "assigned_by_user" edge to the User entity. +func (_c *UserSubscriptionCreate) SetAssignedByUser(v *User) *UserSubscriptionCreate { + return _c.SetAssignedByUserID(v.ID) +} + +// Mutation returns the UserSubscriptionMutation object of the builder. +func (_c *UserSubscriptionCreate) Mutation() *UserSubscriptionMutation { + return _c.mutation +} + +// Save creates the UserSubscription in the database. +func (_c *UserSubscriptionCreate) Save(ctx context.Context) (*UserSubscription, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserSubscriptionCreate) SaveX(ctx context.Context) *UserSubscription { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserSubscriptionCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserSubscriptionCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserSubscriptionCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := usersubscription.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := usersubscription.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := usersubscription.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.DailyUsageUsd(); !ok { + v := usersubscription.DefaultDailyUsageUsd + _c.mutation.SetDailyUsageUsd(v) + } + if _, ok := _c.mutation.WeeklyUsageUsd(); !ok { + v := usersubscription.DefaultWeeklyUsageUsd + _c.mutation.SetWeeklyUsageUsd(v) + } + if _, ok := _c.mutation.MonthlyUsageUsd(); !ok { + v := usersubscription.DefaultMonthlyUsageUsd + _c.mutation.SetMonthlyUsageUsd(v) + } + if _, ok := _c.mutation.AssignedAt(); !ok { + v := usersubscription.DefaultAssignedAt() + _c.mutation.SetAssignedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserSubscriptionCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserSubscription.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UserSubscription.updated_at"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "UserSubscription.user_id"`)} + } + if _, ok := _c.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "UserSubscription.group_id"`)} + } + if _, ok := _c.mutation.StartsAt(); !ok { + return &ValidationError{Name: "starts_at", err: errors.New(`ent: missing required field "UserSubscription.starts_at"`)} + } + if _, ok := _c.mutation.ExpiresAt(); !ok { + return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "UserSubscription.expires_at"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "UserSubscription.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := usersubscription.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UserSubscription.status": %w`, err)} + } + } + if _, ok := _c.mutation.DailyUsageUsd(); !ok { + return &ValidationError{Name: "daily_usage_usd", err: errors.New(`ent: missing required field "UserSubscription.daily_usage_usd"`)} + } + if _, ok := _c.mutation.WeeklyUsageUsd(); !ok { + return &ValidationError{Name: "weekly_usage_usd", err: errors.New(`ent: missing required field "UserSubscription.weekly_usage_usd"`)} + } + if _, ok := _c.mutation.MonthlyUsageUsd(); !ok { + return &ValidationError{Name: "monthly_usage_usd", err: errors.New(`ent: missing required field "UserSubscription.monthly_usage_usd"`)} + } + if _, ok := _c.mutation.AssignedAt(); !ok { + return &ValidationError{Name: "assigned_at", err: errors.New(`ent: missing required field "UserSubscription.assigned_at"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "UserSubscription.user"`)} + } + if len(_c.mutation.GroupIDs()) == 0 { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "UserSubscription.group"`)} + } + return nil +} + +func (_c *UserSubscriptionCreate) sqlSave(ctx context.Context) (*UserSubscription, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserSubscriptionCreate) createSpec() (*UserSubscription, *sqlgraph.CreateSpec) { + var ( + _node = &UserSubscription{config: _c.config} + _spec = sqlgraph.NewCreateSpec(usersubscription.Table, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(usersubscription.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(usersubscription.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.StartsAt(); ok { + _spec.SetField(usersubscription.FieldStartsAt, field.TypeTime, value) + _node.StartsAt = value + } + if value, ok := _c.mutation.ExpiresAt(); ok { + _spec.SetField(usersubscription.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(usersubscription.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.DailyWindowStart(); ok { + _spec.SetField(usersubscription.FieldDailyWindowStart, field.TypeTime, value) + _node.DailyWindowStart = &value + } + if value, ok := _c.mutation.WeeklyWindowStart(); ok { + _spec.SetField(usersubscription.FieldWeeklyWindowStart, field.TypeTime, value) + _node.WeeklyWindowStart = &value + } + if value, ok := _c.mutation.MonthlyWindowStart(); ok { + _spec.SetField(usersubscription.FieldMonthlyWindowStart, field.TypeTime, value) + _node.MonthlyWindowStart = &value + } + if value, ok := _c.mutation.DailyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + _node.DailyUsageUsd = value + } + if value, ok := _c.mutation.WeeklyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + _node.WeeklyUsageUsd = value + } + if value, ok := _c.mutation.MonthlyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + _node.MonthlyUsageUsd = value + } + if value, ok := _c.mutation.AssignedAt(); ok { + _spec.SetField(usersubscription.FieldAssignedAt, field.TypeTime, value) + _node.AssignedAt = value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(usersubscription.FieldNotes, field.TypeString, value) + _node.Notes = &value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AssignedByUserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AssignedBy = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserSubscription.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserSubscriptionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserSubscriptionCreate) OnConflict(opts ...sql.ConflictOption) *UserSubscriptionUpsertOne { + _c.conflict = opts + return &UserSubscriptionUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserSubscriptionCreate) OnConflictColumns(columns ...string) *UserSubscriptionUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserSubscriptionUpsertOne{ + create: _c, + } +} + +type ( + // UserSubscriptionUpsertOne is the builder for "upsert"-ing + // one UserSubscription node. + UserSubscriptionUpsertOne struct { + create *UserSubscriptionCreate + } + + // UserSubscriptionUpsert is the "OnConflict" setter. + UserSubscriptionUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserSubscriptionUpsert) SetUpdatedAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateUpdatedAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldUpdatedAt) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserSubscriptionUpsert) SetUserID(v int64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateUserID() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldUserID) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *UserSubscriptionUpsert) SetGroupID(v int64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateGroupID() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldGroupID) + return u +} + +// SetStartsAt sets the "starts_at" field. +func (u *UserSubscriptionUpsert) SetStartsAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldStartsAt, v) + return u +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateStartsAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldStartsAt) + return u +} + +// SetExpiresAt sets the "expires_at" field. +func (u *UserSubscriptionUpsert) SetExpiresAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldExpiresAt, v) + return u +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateExpiresAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldExpiresAt) + return u +} + +// SetStatus sets the "status" field. +func (u *UserSubscriptionUpsert) SetStatus(v string) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateStatus() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldStatus) + return u +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (u *UserSubscriptionUpsert) SetDailyWindowStart(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldDailyWindowStart, v) + return u +} + +// UpdateDailyWindowStart sets the "daily_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateDailyWindowStart() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldDailyWindowStart) + return u +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (u *UserSubscriptionUpsert) ClearDailyWindowStart() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldDailyWindowStart) + return u +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (u *UserSubscriptionUpsert) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldWeeklyWindowStart, v) + return u +} + +// UpdateWeeklyWindowStart sets the "weekly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateWeeklyWindowStart() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldWeeklyWindowStart) + return u +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (u *UserSubscriptionUpsert) ClearWeeklyWindowStart() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldWeeklyWindowStart) + return u +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (u *UserSubscriptionUpsert) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldMonthlyWindowStart, v) + return u +} + +// UpdateMonthlyWindowStart sets the "monthly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateMonthlyWindowStart() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldMonthlyWindowStart) + return u +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (u *UserSubscriptionUpsert) ClearMonthlyWindowStart() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldMonthlyWindowStart) + return u +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (u *UserSubscriptionUpsert) SetDailyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldDailyUsageUsd, v) + return u +} + +// UpdateDailyUsageUsd sets the "daily_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateDailyUsageUsd() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldDailyUsageUsd) + return u +} + +// AddDailyUsageUsd adds v to the "daily_usage_usd" field. +func (u *UserSubscriptionUpsert) AddDailyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Add(usersubscription.FieldDailyUsageUsd, v) + return u +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsert) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldWeeklyUsageUsd, v) + return u +} + +// UpdateWeeklyUsageUsd sets the "weekly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateWeeklyUsageUsd() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldWeeklyUsageUsd) + return u +} + +// AddWeeklyUsageUsd adds v to the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsert) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Add(usersubscription.FieldWeeklyUsageUsd, v) + return u +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsert) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldMonthlyUsageUsd, v) + return u +} + +// UpdateMonthlyUsageUsd sets the "monthly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateMonthlyUsageUsd() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldMonthlyUsageUsd) + return u +} + +// AddMonthlyUsageUsd adds v to the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsert) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Add(usersubscription.FieldMonthlyUsageUsd, v) + return u +} + +// SetAssignedBy sets the "assigned_by" field. +func (u *UserSubscriptionUpsert) SetAssignedBy(v int64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldAssignedBy, v) + return u +} + +// UpdateAssignedBy sets the "assigned_by" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateAssignedBy() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldAssignedBy) + return u +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (u *UserSubscriptionUpsert) ClearAssignedBy() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldAssignedBy) + return u +} + +// SetAssignedAt sets the "assigned_at" field. +func (u *UserSubscriptionUpsert) SetAssignedAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldAssignedAt, v) + return u +} + +// UpdateAssignedAt sets the "assigned_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateAssignedAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldAssignedAt) + return u +} + +// SetNotes sets the "notes" field. +func (u *UserSubscriptionUpsert) SetNotes(v string) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateNotes() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldNotes) + return u +} + +// ClearNotes clears the value of the "notes" field. +func (u *UserSubscriptionUpsert) ClearNotes() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldNotes) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserSubscriptionUpsertOne) UpdateNewValues() *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(usersubscription.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserSubscriptionUpsertOne) Ignore() *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserSubscriptionUpsertOne) DoNothing() *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserSubscriptionCreate.OnConflict +// documentation for more info. +func (u *UserSubscriptionUpsertOne) Update(set func(*UserSubscriptionUpsert)) *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserSubscriptionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserSubscriptionUpsertOne) SetUpdatedAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateUpdatedAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserSubscriptionUpsertOne) SetUserID(v int64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateUserID() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserSubscriptionUpsertOne) SetGroupID(v int64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateGroupID() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateGroupID() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *UserSubscriptionUpsertOne) SetStartsAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateStartsAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStartsAt() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *UserSubscriptionUpsertOne) SetExpiresAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateExpiresAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateExpiresAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UserSubscriptionUpsertOne) SetStatus(v string) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateStatus() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStatus() + }) +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (u *UserSubscriptionUpsertOne) SetDailyWindowStart(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyWindowStart(v) + }) +} + +// UpdateDailyWindowStart sets the "daily_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateDailyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyWindowStart() + }) +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (u *UserSubscriptionUpsertOne) ClearDailyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearDailyWindowStart() + }) +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (u *UserSubscriptionUpsertOne) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyWindowStart(v) + }) +} + +// UpdateWeeklyWindowStart sets the "weekly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateWeeklyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyWindowStart() + }) +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (u *UserSubscriptionUpsertOne) ClearWeeklyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearWeeklyWindowStart() + }) +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (u *UserSubscriptionUpsertOne) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyWindowStart(v) + }) +} + +// UpdateMonthlyWindowStart sets the "monthly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateMonthlyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyWindowStart() + }) +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (u *UserSubscriptionUpsertOne) ClearMonthlyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearMonthlyWindowStart() + }) +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertOne) SetDailyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyUsageUsd(v) + }) +} + +// AddDailyUsageUsd adds v to the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertOne) AddDailyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddDailyUsageUsd(v) + }) +} + +// UpdateDailyUsageUsd sets the "daily_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateDailyUsageUsd() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyUsageUsd() + }) +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyUsageUsd(v) + }) +} + +// AddWeeklyUsageUsd adds v to the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddWeeklyUsageUsd(v) + }) +} + +// UpdateWeeklyUsageUsd sets the "weekly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateWeeklyUsageUsd() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyUsageUsd() + }) +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyUsageUsd(v) + }) +} + +// AddMonthlyUsageUsd adds v to the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddMonthlyUsageUsd(v) + }) +} + +// UpdateMonthlyUsageUsd sets the "monthly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateMonthlyUsageUsd() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyUsageUsd() + }) +} + +// SetAssignedBy sets the "assigned_by" field. +func (u *UserSubscriptionUpsertOne) SetAssignedBy(v int64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedBy(v) + }) +} + +// UpdateAssignedBy sets the "assigned_by" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateAssignedBy() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedBy() + }) +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (u *UserSubscriptionUpsertOne) ClearAssignedBy() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearAssignedBy() + }) +} + +// SetAssignedAt sets the "assigned_at" field. +func (u *UserSubscriptionUpsertOne) SetAssignedAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedAt(v) + }) +} + +// UpdateAssignedAt sets the "assigned_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateAssignedAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserSubscriptionUpsertOne) SetNotes(v string) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateNotes() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *UserSubscriptionUpsertOne) ClearNotes() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearNotes() + }) +} + +// Exec executes the query. +func (u *UserSubscriptionUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserSubscriptionCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserSubscriptionUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserSubscriptionUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserSubscriptionUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserSubscriptionCreateBulk is the builder for creating many UserSubscription entities in bulk. +type UserSubscriptionCreateBulk struct { + config + err error + builders []*UserSubscriptionCreate + conflict []sql.ConflictOption +} + +// Save creates the UserSubscription entities in the database. +func (_c *UserSubscriptionCreateBulk) Save(ctx context.Context) ([]*UserSubscription, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserSubscription, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserSubscriptionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserSubscriptionCreateBulk) SaveX(ctx context.Context) []*UserSubscription { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserSubscriptionCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserSubscriptionCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserSubscription.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserSubscriptionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserSubscriptionCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserSubscriptionUpsertBulk { + _c.conflict = opts + return &UserSubscriptionUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserSubscriptionCreateBulk) OnConflictColumns(columns ...string) *UserSubscriptionUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserSubscriptionUpsertBulk{ + create: _c, + } +} + +// UserSubscriptionUpsertBulk is the builder for "upsert"-ing +// a bulk of UserSubscription nodes. +type UserSubscriptionUpsertBulk struct { + create *UserSubscriptionCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserSubscriptionUpsertBulk) UpdateNewValues() *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(usersubscription.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserSubscriptionUpsertBulk) Ignore() *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserSubscriptionUpsertBulk) DoNothing() *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserSubscriptionCreateBulk.OnConflict +// documentation for more info. +func (u *UserSubscriptionUpsertBulk) Update(set func(*UserSubscriptionUpsert)) *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserSubscriptionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserSubscriptionUpsertBulk) SetUpdatedAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateUpdatedAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserSubscriptionUpsertBulk) SetUserID(v int64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateUserID() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserSubscriptionUpsertBulk) SetGroupID(v int64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateGroupID() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateGroupID() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *UserSubscriptionUpsertBulk) SetStartsAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateStartsAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStartsAt() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *UserSubscriptionUpsertBulk) SetExpiresAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateExpiresAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateExpiresAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UserSubscriptionUpsertBulk) SetStatus(v string) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateStatus() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStatus() + }) +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (u *UserSubscriptionUpsertBulk) SetDailyWindowStart(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyWindowStart(v) + }) +} + +// UpdateDailyWindowStart sets the "daily_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateDailyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyWindowStart() + }) +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (u *UserSubscriptionUpsertBulk) ClearDailyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearDailyWindowStart() + }) +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (u *UserSubscriptionUpsertBulk) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyWindowStart(v) + }) +} + +// UpdateWeeklyWindowStart sets the "weekly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateWeeklyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyWindowStart() + }) +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (u *UserSubscriptionUpsertBulk) ClearWeeklyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearWeeklyWindowStart() + }) +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (u *UserSubscriptionUpsertBulk) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyWindowStart(v) + }) +} + +// UpdateMonthlyWindowStart sets the "monthly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateMonthlyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyWindowStart() + }) +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (u *UserSubscriptionUpsertBulk) ClearMonthlyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearMonthlyWindowStart() + }) +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) SetDailyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyUsageUsd(v) + }) +} + +// AddDailyUsageUsd adds v to the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) AddDailyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddDailyUsageUsd(v) + }) +} + +// UpdateDailyUsageUsd sets the "daily_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateDailyUsageUsd() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyUsageUsd() + }) +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyUsageUsd(v) + }) +} + +// AddWeeklyUsageUsd adds v to the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddWeeklyUsageUsd(v) + }) +} + +// UpdateWeeklyUsageUsd sets the "weekly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateWeeklyUsageUsd() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyUsageUsd() + }) +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyUsageUsd(v) + }) +} + +// AddMonthlyUsageUsd adds v to the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddMonthlyUsageUsd(v) + }) +} + +// UpdateMonthlyUsageUsd sets the "monthly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateMonthlyUsageUsd() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyUsageUsd() + }) +} + +// SetAssignedBy sets the "assigned_by" field. +func (u *UserSubscriptionUpsertBulk) SetAssignedBy(v int64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedBy(v) + }) +} + +// UpdateAssignedBy sets the "assigned_by" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateAssignedBy() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedBy() + }) +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (u *UserSubscriptionUpsertBulk) ClearAssignedBy() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearAssignedBy() + }) +} + +// SetAssignedAt sets the "assigned_at" field. +func (u *UserSubscriptionUpsertBulk) SetAssignedAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedAt(v) + }) +} + +// UpdateAssignedAt sets the "assigned_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateAssignedAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserSubscriptionUpsertBulk) SetNotes(v string) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateNotes() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *UserSubscriptionUpsertBulk) ClearNotes() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearNotes() + }) +} + +// Exec executes the query. +func (u *UserSubscriptionUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserSubscriptionCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserSubscriptionCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserSubscriptionUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usersubscription_delete.go b/backend/ent/usersubscription_delete.go new file mode 100644 index 00000000..02096763 --- /dev/null +++ b/backend/ent/usersubscription_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionDelete is the builder for deleting a UserSubscription entity. +type UserSubscriptionDelete struct { + config + hooks []Hook + mutation *UserSubscriptionMutation +} + +// Where appends a list predicates to the UserSubscriptionDelete builder. +func (_d *UserSubscriptionDelete) Where(ps ...predicate.UserSubscription) *UserSubscriptionDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserSubscriptionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserSubscriptionDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserSubscriptionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(usersubscription.Table, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserSubscriptionDeleteOne is the builder for deleting a single UserSubscription entity. +type UserSubscriptionDeleteOne struct { + _d *UserSubscriptionDelete +} + +// Where appends a list predicates to the UserSubscriptionDelete builder. +func (_d *UserSubscriptionDeleteOne) Where(ps ...predicate.UserSubscription) *UserSubscriptionDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserSubscriptionDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{usersubscription.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserSubscriptionDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usersubscription_query.go b/backend/ent/usersubscription_query.go new file mode 100644 index 00000000..034f29b4 --- /dev/null +++ b/backend/ent/usersubscription_query.go @@ -0,0 +1,758 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionQuery is the builder for querying UserSubscription entities. +type UserSubscriptionQuery struct { + config + ctx *QueryContext + order []usersubscription.OrderOption + inters []Interceptor + predicates []predicate.UserSubscription + withUser *UserQuery + withGroup *GroupQuery + withAssignedByUser *UserQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserSubscriptionQuery builder. +func (_q *UserSubscriptionQuery) Where(ps ...predicate.UserSubscription) *UserSubscriptionQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserSubscriptionQuery) Limit(limit int) *UserSubscriptionQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserSubscriptionQuery) Offset(offset int) *UserSubscriptionQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserSubscriptionQuery) Unique(unique bool) *UserSubscriptionQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserSubscriptionQuery) Order(o ...usersubscription.OrderOption) *UserSubscriptionQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *UserSubscriptionQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.UserTable, usersubscription.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *UserSubscriptionQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.GroupTable, usersubscription.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAssignedByUser chains the current query on the "assigned_by_user" edge. +func (_q *UserSubscriptionQuery) QueryAssignedByUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.AssignedByUserTable, usersubscription.AssignedByUserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserSubscription entity from the query. +// Returns a *NotFoundError when no UserSubscription was found. +func (_q *UserSubscriptionQuery) First(ctx context.Context) (*UserSubscription, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{usersubscription.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserSubscriptionQuery) FirstX(ctx context.Context) *UserSubscription { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UserSubscription ID from the query. +// Returns a *NotFoundError when no UserSubscription ID was found. +func (_q *UserSubscriptionQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{usersubscription.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserSubscriptionQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UserSubscription entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserSubscription entity is found. +// Returns a *NotFoundError when no UserSubscription entities are found. +func (_q *UserSubscriptionQuery) Only(ctx context.Context) (*UserSubscription, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{usersubscription.Label} + default: + return nil, &NotSingularError{usersubscription.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserSubscriptionQuery) OnlyX(ctx context.Context) *UserSubscription { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UserSubscription ID in the query. +// Returns a *NotSingularError when more than one UserSubscription ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserSubscriptionQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{usersubscription.Label} + default: + err = &NotSingularError{usersubscription.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserSubscriptionQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UserSubscriptions. +func (_q *UserSubscriptionQuery) All(ctx context.Context) ([]*UserSubscription, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserSubscription, *UserSubscriptionQuery]() + return withInterceptors[[]*UserSubscription](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserSubscriptionQuery) AllX(ctx context.Context) []*UserSubscription { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UserSubscription IDs. +func (_q *UserSubscriptionQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(usersubscription.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserSubscriptionQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserSubscriptionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserSubscriptionQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserSubscriptionQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserSubscriptionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserSubscriptionQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserSubscriptionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserSubscriptionQuery) Clone() *UserSubscriptionQuery { + if _q == nil { + return nil + } + return &UserSubscriptionQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]usersubscription.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserSubscription{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + withAssignedByUser: _q.withAssignedByUser.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserSubscriptionQuery) WithUser(opts ...func(*UserQuery)) *UserSubscriptionQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserSubscriptionQuery) WithGroup(opts ...func(*GroupQuery)) *UserSubscriptionQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// WithAssignedByUser tells the query-builder to eager-load the nodes that are connected to +// the "assigned_by_user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserSubscriptionQuery) WithAssignedByUser(opts ...func(*UserQuery)) *UserSubscriptionQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAssignedByUser = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserSubscription.Query(). +// GroupBy(usersubscription.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserSubscriptionQuery) GroupBy(field string, fields ...string) *UserSubscriptionGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserSubscriptionGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = usersubscription.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UserSubscription.Query(). +// Select(usersubscription.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserSubscriptionQuery) Select(fields ...string) *UserSubscriptionSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserSubscriptionSelect{UserSubscriptionQuery: _q} + sbuild.label = usersubscription.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSubscriptionSelect configured with the given aggregations. +func (_q *UserSubscriptionQuery) Aggregate(fns ...AggregateFunc) *UserSubscriptionSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserSubscriptionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !usersubscription.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserSubscriptionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserSubscription, error) { + var ( + nodes = []*UserSubscription{} + _spec = _q.querySpec() + loadedTypes = [3]bool{ + _q.withUser != nil, + _q.withGroup != nil, + _q.withAssignedByUser != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserSubscription).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserSubscription{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *UserSubscription, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *UserSubscription, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + if query := _q.withAssignedByUser; query != nil { + if err := _q.loadAssignedByUser(ctx, query, nodes, nil, + func(n *UserSubscription, e *User) { n.Edges.AssignedByUser = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserSubscriptionQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*UserSubscription, init func(*UserSubscription), assign func(*UserSubscription, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserSubscription) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserSubscriptionQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*UserSubscription, init func(*UserSubscription), assign func(*UserSubscription, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserSubscription) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserSubscriptionQuery) loadAssignedByUser(ctx context.Context, query *UserQuery, nodes []*UserSubscription, init func(*UserSubscription), assign func(*UserSubscription, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserSubscription) + for i := range nodes { + if nodes[i].AssignedBy == nil { + continue + } + fk := *nodes[i].AssignedBy + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "assigned_by" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *UserSubscriptionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserSubscriptionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(usersubscription.Table, usersubscription.Columns, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usersubscription.FieldID) + for i := range fields { + if fields[i] != usersubscription.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(usersubscription.FieldUserID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(usersubscription.FieldGroupID) + } + if _q.withAssignedByUser != nil { + _spec.Node.AddColumnOnce(usersubscription.FieldAssignedBy) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserSubscriptionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(usersubscription.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = usersubscription.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserSubscriptionGroupBy is the group-by builder for UserSubscription entities. +type UserSubscriptionGroupBy struct { + selector + build *UserSubscriptionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserSubscriptionGroupBy) Aggregate(fns ...AggregateFunc) *UserSubscriptionGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserSubscriptionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserSubscriptionQuery, *UserSubscriptionGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserSubscriptionGroupBy) sqlScan(ctx context.Context, root *UserSubscriptionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSubscriptionSelect is the builder for selecting fields of UserSubscription entities. +type UserSubscriptionSelect struct { + *UserSubscriptionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserSubscriptionSelect) Aggregate(fns ...AggregateFunc) *UserSubscriptionSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserSubscriptionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserSubscriptionQuery, *UserSubscriptionSelect](ctx, _s.UserSubscriptionQuery, _s, _s.inters, v) +} + +func (_s *UserSubscriptionSelect) sqlScan(ctx context.Context, root *UserSubscriptionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/usersubscription_update.go b/backend/ent/usersubscription_update.go new file mode 100644 index 00000000..c0df17ff --- /dev/null +++ b/backend/ent/usersubscription_update.go @@ -0,0 +1,1122 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionUpdate is the builder for updating UserSubscription entities. +type UserSubscriptionUpdate struct { + config + hooks []Hook + mutation *UserSubscriptionMutation +} + +// Where appends a list predicates to the UserSubscriptionUpdate builder. +func (_u *UserSubscriptionUpdate) Where(ps ...predicate.UserSubscription) *UserSubscriptionUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserSubscriptionUpdate) SetUpdatedAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserSubscriptionUpdate) SetUserID(v int64) *UserSubscriptionUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableUserID(v *int64) *UserSubscriptionUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserSubscriptionUpdate) SetGroupID(v int64) *UserSubscriptionUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableGroupID(v *int64) *UserSubscriptionUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *UserSubscriptionUpdate) SetStartsAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableStartsAt(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *UserSubscriptionUpdate) SetExpiresAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableExpiresAt(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserSubscriptionUpdate) SetStatus(v string) *UserSubscriptionUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableStatus(v *string) *UserSubscriptionUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (_u *UserSubscriptionUpdate) SetDailyWindowStart(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetDailyWindowStart(v) + return _u +} + +// SetNillableDailyWindowStart sets the "daily_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableDailyWindowStart(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetDailyWindowStart(*v) + } + return _u +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (_u *UserSubscriptionUpdate) ClearDailyWindowStart() *UserSubscriptionUpdate { + _u.mutation.ClearDailyWindowStart() + return _u +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (_u *UserSubscriptionUpdate) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetWeeklyWindowStart(v) + return _u +} + +// SetNillableWeeklyWindowStart sets the "weekly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableWeeklyWindowStart(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetWeeklyWindowStart(*v) + } + return _u +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (_u *UserSubscriptionUpdate) ClearWeeklyWindowStart() *UserSubscriptionUpdate { + _u.mutation.ClearWeeklyWindowStart() + return _u +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (_u *UserSubscriptionUpdate) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetMonthlyWindowStart(v) + return _u +} + +// SetNillableMonthlyWindowStart sets the "monthly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableMonthlyWindowStart(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetMonthlyWindowStart(*v) + } + return _u +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (_u *UserSubscriptionUpdate) ClearMonthlyWindowStart() *UserSubscriptionUpdate { + _u.mutation.ClearMonthlyWindowStart() + return _u +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdate) SetDailyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.ResetDailyUsageUsd() + _u.mutation.SetDailyUsageUsd(v) + return _u +} + +// SetNillableDailyUsageUsd sets the "daily_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableDailyUsageUsd(v *float64) *UserSubscriptionUpdate { + if v != nil { + _u.SetDailyUsageUsd(*v) + } + return _u +} + +// AddDailyUsageUsd adds value to the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdate) AddDailyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.AddDailyUsageUsd(v) + return _u +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdate) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.ResetWeeklyUsageUsd() + _u.mutation.SetWeeklyUsageUsd(v) + return _u +} + +// SetNillableWeeklyUsageUsd sets the "weekly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableWeeklyUsageUsd(v *float64) *UserSubscriptionUpdate { + if v != nil { + _u.SetWeeklyUsageUsd(*v) + } + return _u +} + +// AddWeeklyUsageUsd adds value to the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdate) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.AddWeeklyUsageUsd(v) + return _u +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdate) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.ResetMonthlyUsageUsd() + _u.mutation.SetMonthlyUsageUsd(v) + return _u +} + +// SetNillableMonthlyUsageUsd sets the "monthly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableMonthlyUsageUsd(v *float64) *UserSubscriptionUpdate { + if v != nil { + _u.SetMonthlyUsageUsd(*v) + } + return _u +} + +// AddMonthlyUsageUsd adds value to the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdate) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.AddMonthlyUsageUsd(v) + return _u +} + +// SetAssignedBy sets the "assigned_by" field. +func (_u *UserSubscriptionUpdate) SetAssignedBy(v int64) *UserSubscriptionUpdate { + _u.mutation.SetAssignedBy(v) + return _u +} + +// SetNillableAssignedBy sets the "assigned_by" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableAssignedBy(v *int64) *UserSubscriptionUpdate { + if v != nil { + _u.SetAssignedBy(*v) + } + return _u +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (_u *UserSubscriptionUpdate) ClearAssignedBy() *UserSubscriptionUpdate { + _u.mutation.ClearAssignedBy() + return _u +} + +// SetAssignedAt sets the "assigned_at" field. +func (_u *UserSubscriptionUpdate) SetAssignedAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetAssignedAt(v) + return _u +} + +// SetNillableAssignedAt sets the "assigned_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableAssignedAt(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetAssignedAt(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserSubscriptionUpdate) SetNotes(v string) *UserSubscriptionUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableNotes(v *string) *UserSubscriptionUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *UserSubscriptionUpdate) ClearNotes() *UserSubscriptionUpdate { + _u.mutation.ClearNotes() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserSubscriptionUpdate) SetUser(v *User) *UserSubscriptionUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdate) SetGroup(v *Group) *UserSubscriptionUpdate { + return _u.SetGroupID(v.ID) +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID. +func (_u *UserSubscriptionUpdate) SetAssignedByUserID(id int64) *UserSubscriptionUpdate { + _u.mutation.SetAssignedByUserID(id) + return _u +} + +// SetNillableAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableAssignedByUserID(id *int64) *UserSubscriptionUpdate { + if id != nil { + _u = _u.SetAssignedByUserID(*id) + } + return _u +} + +// SetAssignedByUser sets the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdate) SetAssignedByUser(v *User) *UserSubscriptionUpdate { + return _u.SetAssignedByUserID(v.ID) +} + +// Mutation returns the UserSubscriptionMutation object of the builder. +func (_u *UserSubscriptionUpdate) Mutation() *UserSubscriptionMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserSubscriptionUpdate) ClearUser() *UserSubscriptionUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdate) ClearGroup() *UserSubscriptionUpdate { + _u.mutation.ClearGroup() + return _u +} + +// ClearAssignedByUser clears the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdate) ClearAssignedByUser() *UserSubscriptionUpdate { + _u.mutation.ClearAssignedByUser() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserSubscriptionUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserSubscriptionUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserSubscriptionUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserSubscriptionUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserSubscriptionUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := usersubscription.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserSubscriptionUpdate) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usersubscription.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UserSubscription.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.group"`) + } + return nil +} + +func (_u *UserSubscriptionUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usersubscription.Table, usersubscription.Columns, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usersubscription.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(usersubscription.FieldStartsAt, field.TypeTime, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(usersubscription.FieldExpiresAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usersubscription.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.DailyWindowStart(); ok { + _spec.SetField(usersubscription.FieldDailyWindowStart, field.TypeTime, value) + } + if _u.mutation.DailyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldDailyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.WeeklyWindowStart(); ok { + _spec.SetField(usersubscription.FieldWeeklyWindowStart, field.TypeTime, value) + } + if _u.mutation.WeeklyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldWeeklyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.MonthlyWindowStart(); ok { + _spec.SetField(usersubscription.FieldMonthlyWindowStart, field.TypeTime, value) + } + if _u.mutation.MonthlyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldMonthlyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.DailyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.WeeklyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MonthlyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AssignedAt(); ok { + _spec.SetField(usersubscription.FieldAssignedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(usersubscription.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(usersubscription.FieldNotes, field.TypeString) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedByUserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedByUserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usersubscription.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserSubscriptionUpdateOne is the builder for updating a single UserSubscription entity. +type UserSubscriptionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserSubscriptionMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserSubscriptionUpdateOne) SetUpdatedAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserSubscriptionUpdateOne) SetUserID(v int64) *UserSubscriptionUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableUserID(v *int64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserSubscriptionUpdateOne) SetGroupID(v int64) *UserSubscriptionUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableGroupID(v *int64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *UserSubscriptionUpdateOne) SetStartsAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableStartsAt(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *UserSubscriptionUpdateOne) SetExpiresAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableExpiresAt(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserSubscriptionUpdateOne) SetStatus(v string) *UserSubscriptionUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableStatus(v *string) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (_u *UserSubscriptionUpdateOne) SetDailyWindowStart(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetDailyWindowStart(v) + return _u +} + +// SetNillableDailyWindowStart sets the "daily_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableDailyWindowStart(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetDailyWindowStart(*v) + } + return _u +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (_u *UserSubscriptionUpdateOne) ClearDailyWindowStart() *UserSubscriptionUpdateOne { + _u.mutation.ClearDailyWindowStart() + return _u +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (_u *UserSubscriptionUpdateOne) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetWeeklyWindowStart(v) + return _u +} + +// SetNillableWeeklyWindowStart sets the "weekly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableWeeklyWindowStart(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetWeeklyWindowStart(*v) + } + return _u +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (_u *UserSubscriptionUpdateOne) ClearWeeklyWindowStart() *UserSubscriptionUpdateOne { + _u.mutation.ClearWeeklyWindowStart() + return _u +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (_u *UserSubscriptionUpdateOne) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetMonthlyWindowStart(v) + return _u +} + +// SetNillableMonthlyWindowStart sets the "monthly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableMonthlyWindowStart(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetMonthlyWindowStart(*v) + } + return _u +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (_u *UserSubscriptionUpdateOne) ClearMonthlyWindowStart() *UserSubscriptionUpdateOne { + _u.mutation.ClearMonthlyWindowStart() + return _u +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) SetDailyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.ResetDailyUsageUsd() + _u.mutation.SetDailyUsageUsd(v) + return _u +} + +// SetNillableDailyUsageUsd sets the "daily_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableDailyUsageUsd(v *float64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetDailyUsageUsd(*v) + } + return _u +} + +// AddDailyUsageUsd adds value to the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) AddDailyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.AddDailyUsageUsd(v) + return _u +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.ResetWeeklyUsageUsd() + _u.mutation.SetWeeklyUsageUsd(v) + return _u +} + +// SetNillableWeeklyUsageUsd sets the "weekly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableWeeklyUsageUsd(v *float64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetWeeklyUsageUsd(*v) + } + return _u +} + +// AddWeeklyUsageUsd adds value to the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.AddWeeklyUsageUsd(v) + return _u +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.ResetMonthlyUsageUsd() + _u.mutation.SetMonthlyUsageUsd(v) + return _u +} + +// SetNillableMonthlyUsageUsd sets the "monthly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableMonthlyUsageUsd(v *float64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetMonthlyUsageUsd(*v) + } + return _u +} + +// AddMonthlyUsageUsd adds value to the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.AddMonthlyUsageUsd(v) + return _u +} + +// SetAssignedBy sets the "assigned_by" field. +func (_u *UserSubscriptionUpdateOne) SetAssignedBy(v int64) *UserSubscriptionUpdateOne { + _u.mutation.SetAssignedBy(v) + return _u +} + +// SetNillableAssignedBy sets the "assigned_by" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableAssignedBy(v *int64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetAssignedBy(*v) + } + return _u +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (_u *UserSubscriptionUpdateOne) ClearAssignedBy() *UserSubscriptionUpdateOne { + _u.mutation.ClearAssignedBy() + return _u +} + +// SetAssignedAt sets the "assigned_at" field. +func (_u *UserSubscriptionUpdateOne) SetAssignedAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetAssignedAt(v) + return _u +} + +// SetNillableAssignedAt sets the "assigned_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableAssignedAt(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetAssignedAt(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserSubscriptionUpdateOne) SetNotes(v string) *UserSubscriptionUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableNotes(v *string) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *UserSubscriptionUpdateOne) ClearNotes() *UserSubscriptionUpdateOne { + _u.mutation.ClearNotes() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) SetUser(v *User) *UserSubscriptionUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdateOne) SetGroup(v *Group) *UserSubscriptionUpdateOne { + return _u.SetGroupID(v.ID) +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID. +func (_u *UserSubscriptionUpdateOne) SetAssignedByUserID(id int64) *UserSubscriptionUpdateOne { + _u.mutation.SetAssignedByUserID(id) + return _u +} + +// SetNillableAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableAssignedByUserID(id *int64) *UserSubscriptionUpdateOne { + if id != nil { + _u = _u.SetAssignedByUserID(*id) + } + return _u +} + +// SetAssignedByUser sets the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) SetAssignedByUser(v *User) *UserSubscriptionUpdateOne { + return _u.SetAssignedByUserID(v.ID) +} + +// Mutation returns the UserSubscriptionMutation object of the builder. +func (_u *UserSubscriptionUpdateOne) Mutation() *UserSubscriptionMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) ClearUser() *UserSubscriptionUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdateOne) ClearGroup() *UserSubscriptionUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// ClearAssignedByUser clears the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) ClearAssignedByUser() *UserSubscriptionUpdateOne { + _u.mutation.ClearAssignedByUser() + return _u +} + +// Where appends a list predicates to the UserSubscriptionUpdate builder. +func (_u *UserSubscriptionUpdateOne) Where(ps ...predicate.UserSubscription) *UserSubscriptionUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserSubscriptionUpdateOne) Select(field string, fields ...string) *UserSubscriptionUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserSubscription entity. +func (_u *UserSubscriptionUpdateOne) Save(ctx context.Context) (*UserSubscription, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserSubscriptionUpdateOne) SaveX(ctx context.Context) *UserSubscription { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserSubscriptionUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserSubscriptionUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserSubscriptionUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := usersubscription.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserSubscriptionUpdateOne) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usersubscription.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UserSubscription.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.group"`) + } + return nil +} + +func (_u *UserSubscriptionUpdateOne) sqlSave(ctx context.Context) (_node *UserSubscription, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usersubscription.Table, usersubscription.Columns, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UserSubscription.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usersubscription.FieldID) + for _, f := range fields { + if !usersubscription.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != usersubscription.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usersubscription.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(usersubscription.FieldStartsAt, field.TypeTime, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(usersubscription.FieldExpiresAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usersubscription.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.DailyWindowStart(); ok { + _spec.SetField(usersubscription.FieldDailyWindowStart, field.TypeTime, value) + } + if _u.mutation.DailyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldDailyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.WeeklyWindowStart(); ok { + _spec.SetField(usersubscription.FieldWeeklyWindowStart, field.TypeTime, value) + } + if _u.mutation.WeeklyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldWeeklyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.MonthlyWindowStart(); ok { + _spec.SetField(usersubscription.FieldMonthlyWindowStart, field.TypeTime, value) + } + if _u.mutation.MonthlyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldMonthlyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.DailyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.WeeklyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MonthlyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AssignedAt(); ok { + _spec.SetField(usersubscription.FieldAssignedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(usersubscription.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(usersubscription.FieldNotes, field.TypeString) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedByUserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedByUserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserSubscription{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usersubscription.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/go.mod b/backend/go.mod index 084b58cd..149bb64a 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -5,6 +5,7 @@ go 1.24.0 toolchain go1.24.11 require ( + entgo.io/ent v0.14.5 github.com/gin-gonic/gin v1.9.1 github.com/golang-jwt/jwt/v5 v5.2.0 github.com/google/uuid v1.6.0 @@ -23,17 +24,17 @@ require ( golang.org/x/net v0.47.0 golang.org/x/term v0.37.0 gopkg.in/yaml.v3 v3.0.1 - gorm.io/datatypes v1.2.0 - gorm.io/driver/postgres v1.5.4 - gorm.io/gorm v1.25.5 ) require ( + ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 // indirect dario.cat/mergo v1.0.2 // indirect - filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/andybalholm/brotli v1.2.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect github.com/bytedance/sonic v1.9.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -58,22 +59,19 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.14.0 // indirect - github.com/go-sql-driver/mysql v1.9.0 // indirect github.com/goccy/go-json v0.10.2 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/subcommands v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl/v2 v2.18.1 // indirect github.com/icholy/digest v1.1.0 // indirect - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.4 // indirect - github.com/jackc/puddle/v2 v2.2.2 // indirect - github.com/jinzhu/inflection v1.0.0 // indirect - github.com/jinzhu/now v1.1.5 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.1 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect @@ -82,7 +80,9 @@ require ( github.com/magiconair/properties v1.8.10 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mdelapenya/tlscert v0.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/go-archive v0.1.0 // indirect @@ -94,6 +94,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect @@ -103,6 +104,7 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.56.0 // indirect github.com/refraction-networking/utls v1.8.1 // indirect + github.com/rivo/uniseg v0.2.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shirou/gopsutil/v4 v4.25.6 // indirect @@ -111,6 +113,7 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/testcontainers/testcontainers-go v0.40.0 // indirect @@ -121,6 +124,8 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.11 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect @@ -137,8 +142,8 @@ require ( golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.31.0 // indirect golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect google.golang.org/grpc v1.75.1 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gorm.io/driver/mysql v1.5.2 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index 2afa97d7..3552989e 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,15 +1,25 @@ +ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 h1:E0wvcUXTkgyN4wy4LGtNzMNGMytJN8afmIWXJVMi4cc= +ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +entgo.io/ent v0.14.5 h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4= +entgo.io/ent v0.14.5/go.mod h1:zTzLmWtPvGpmSwtkaayM2cm5m819NdM7z7tYPq3vN0U= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -34,6 +44,7 @@ github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpS github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -73,6 +84,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -81,17 +94,12 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-sql-driver/mysql v1.9.0 h1:Y0zIbQXhQKmQgTp44Y1dp3wTXcn804QoTptLZT1vtvo= -github.com/go-sql-driver/mysql v1.9.0/go.mod h1:pDetrLJeA3oMujJuvXc8RJoasr589B6A9fwzD3QMrqw= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= -github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= -github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -109,10 +117,14 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLW github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= +github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/icholy/digest v1.1.0 h1:HfGg9Irj7i+IX1o1QAmPfIBNu/Q5A5Tu3n/MED9k9H4= github.com/icholy/digest v1.1.0/go.mod h1:QNrsSGQ5v7v9cReDI0+eyjsXGUoRSUZQHeQ5C4XLa0Y= github.com/imroc/req/v3 v3.56.0 h1:t6YdqqerYBXhZ9+VjqsQs5wlKxdUNEvsgBhxWc1AEEo= github.com/imroc/req/v3 v3.56.0/go.mod h1:cUZSooE8hhzFNOrAbdxuemXDQxFXLQTnu3066jr7ZGk= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -121,10 +133,6 @@ github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg= github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= -github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= @@ -136,6 +144,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= @@ -149,12 +159,15 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= +github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= -github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= -github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -180,6 +193,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -203,12 +218,17 @@ github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4Vi github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkrMJI0pRUOCAo= github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -221,6 +241,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= @@ -269,6 +291,10 @@ github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= github.com/zeromicro/go-zero v1.9.4 h1:aRLFoISqAYijABtkbliQC5SsI5TbizJpQvoHc9xup8k= github.com/zeromicro/go-zero v1.9.4/go.mod h1:a17JOTch25SWxBcUgJZYps60hygK3pIYdw7nGwlcS38= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -329,6 +355,10 @@ golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= @@ -347,19 +377,6 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/datatypes v1.2.0 h1:5YT+eokWdIxhJgWHdrb2zYUimyk0+TaFth+7a0ybzco= -gorm.io/datatypes v1.2.0/go.mod h1:o1dh0ZvjIjhH/bngTpypG6lVRJ5chTBxE09FH/71k04= -gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs= -gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8= -gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo= -gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0= -gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= -gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= -gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0= -gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig= -gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= -gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= -gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 485ed42d..46444375 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -195,8 +195,10 @@ func setDefaults() { viper.SetDefault("jwt.expire_hour", 24) // Default - viper.SetDefault("default.admin_email", "admin@sub2api.com") - viper.SetDefault("default.admin_password", "admin123") + // Admin credentials are created via the setup flow (web wizard / CLI / AUTO_SETUP). + // Do not ship fixed defaults here to avoid insecure "known credentials" in production. + viper.SetDefault("default.admin_email", "") + viper.SetDefault("default.admin_password", "") viper.SetDefault("default.user_concurrency", 5) viper.SetDefault("default.user_balance", 0) viper.SetDefault("default.api_key_prefix", "sk-") diff --git a/backend/internal/infrastructure/database.go b/backend/internal/infrastructure/database.go deleted file mode 100644 index da40bace..00000000 --- a/backend/internal/infrastructure/database.go +++ /dev/null @@ -1,38 +0,0 @@ -package infrastructure - -import ( - "github.com/Wei-Shaw/sub2api/internal/config" - "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" - "github.com/Wei-Shaw/sub2api/internal/repository" - - "gorm.io/driver/postgres" - "gorm.io/gorm" - "gorm.io/gorm/logger" -) - -// InitDB 初始化数据库连接 -func InitDB(cfg *config.Config) (*gorm.DB, error) { - // 初始化时区(在数据库连接之前,确保时区设置正确) - if err := timezone.Init(cfg.Timezone); err != nil { - return nil, err - } - - gormConfig := &gorm.Config{} - if cfg.Server.Mode == "debug" { - gormConfig.Logger = logger.Default.LogMode(logger.Info) - } - - // 使用带时区的 DSN 连接数据库 - db, err := gorm.Open(postgres.Open(cfg.Database.DSNWithTimezone(cfg.Timezone)), gormConfig) - if err != nil { - return nil, err - } - - // 自动迁移(始终执行,确保数据库结构与代码同步) - // GORM 的 AutoMigrate 只会添加新字段,不会删除或修改已有字段,是安全的 - if err := repository.AutoMigrate(db); err != nil { - return nil, err - } - - return db, nil -} diff --git a/backend/internal/infrastructure/ent.go b/backend/internal/infrastructure/ent.go new file mode 100644 index 00000000..0e15c471 --- /dev/null +++ b/backend/internal/infrastructure/ent.go @@ -0,0 +1,65 @@ +// Package infrastructure 提供应用程序的基础设施层组件。 +// 包括数据库连接初始化、ORM 客户端管理、Redis 连接、数据库迁移等核心功能。 +package infrastructure + +import ( + "context" + "database/sql" + + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/migrations" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + _ "github.com/lib/pq" // PostgreSQL 驱动,通过副作用导入注册驱动 +) + +// InitEnt 初始化 Ent ORM 客户端并返回客户端实例和底层的 *sql.DB。 +// +// 该函数执行以下操作: +// 1. 初始化全局时区设置,确保时间处理一致性 +// 2. 建立 PostgreSQL 数据库连接 +// 3. 自动执行数据库迁移,确保 schema 与代码同步 +// 4. 创建并返回 Ent 客户端实例 +// +// 重要提示:调用者必须负责关闭返回的 ent.Client(关闭时会自动关闭底层的 driver/db)。 +// +// 参数: +// - cfg: 应用程序配置,包含数据库连接信息和时区设置 +// +// 返回: +// - *ent.Client: Ent ORM 客户端,用于执行数据库操作 +// - *sql.DB: 底层的 SQL 数据库连接,可用于直接执行原生 SQL +// - error: 初始化过程中的错误 +func InitEnt(cfg *config.Config) (*ent.Client, *sql.DB, error) { + // 优先初始化时区设置,确保所有时间操作使用统一的时区。 + // 这对于跨时区部署和日志时间戳的一致性至关重要。 + if err := timezone.Init(cfg.Timezone); err != nil { + return nil, nil, err + } + + // 构建包含时区信息的数据库连接字符串 (DSN)。 + // 时区信息会传递给 PostgreSQL,确保数据库层面的时间处理正确。 + dsn := cfg.Database.DSNWithTimezone(cfg.Timezone) + + // 使用 Ent 的 SQL 驱动打开 PostgreSQL 连接。 + // dialect.Postgres 指定使用 PostgreSQL 方言进行 SQL 生成。 + drv, err := entsql.Open(dialect.Postgres, dsn) + if err != nil { + return nil, nil, err + } + + // 确保数据库 schema 已准备就绪。 + // SQL 迁移文件是 schema 的权威来源(source of truth)。 + // 这种方式比 Ent 的自动迁移更可控,支持复杂的迁移场景。 + if err := applyMigrationsFS(context.Background(), drv.DB(), migrations.FS); err != nil { + _ = drv.Close() // 迁移失败时关闭驱动,避免资源泄露 + return nil, nil, err + } + + // 创建 Ent 客户端,绑定到已配置的数据库驱动。 + client := ent.NewClient(ent.Driver(drv)) + return client, drv.DB(), nil +} diff --git a/backend/internal/infrastructure/migrations_runner.go b/backend/internal/infrastructure/migrations_runner.go new file mode 100644 index 00000000..69919a19 --- /dev/null +++ b/backend/internal/infrastructure/migrations_runner.go @@ -0,0 +1,184 @@ +package infrastructure + +import ( + "context" + "crypto/sha256" + "database/sql" + "encoding/hex" + "errors" + "fmt" + "io/fs" + "sort" + "strings" + + "github.com/Wei-Shaw/sub2api/migrations" +) + +// schemaMigrationsTableDDL 定义迁移记录表的 DDL。 +// 该表用于跟踪已应用的迁移文件及其校验和。 +// - filename: 迁移文件名,作为主键唯一标识每个迁移 +// - checksum: 文件内容的 SHA256 哈希值,用于检测迁移文件是否被篡改 +// - applied_at: 迁移应用时间戳 +const schemaMigrationsTableDDL = ` +CREATE TABLE IF NOT EXISTS schema_migrations ( + filename TEXT PRIMARY KEY, + checksum TEXT NOT NULL, + applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +` + +// migrationsAdvisoryLockID 是用于序列化迁移操作的 PostgreSQL Advisory Lock ID。 +// 在多实例部署场景下,该锁确保同一时间只有一个实例执行迁移。 +// 任何稳定的 int64 值都可以,只要不与同一数据库中的其他锁冲突即可。 +const migrationsAdvisoryLockID int64 = 694208311321144027 + +// ApplyMigrations 将嵌入的 SQL 迁移文件应用到指定的数据库。 +// +// 该函数可以在每次应用启动时安全调用: +// - 已应用的迁移会被自动跳过(通过校验 filename 判断) +// - 如果迁移文件内容被修改(checksum 不匹配),会返回错误 +// - 使用 PostgreSQL Advisory Lock 确保多实例并发安全 +// +// 参数: +// - ctx: 上下文,用于超时控制和取消 +// - db: 数据库连接 +// +// 返回: +// - error: 迁移过程中的任何错误 +func ApplyMigrations(ctx context.Context, db *sql.DB) error { + if db == nil { + return errors.New("nil sql db") + } + return applyMigrationsFS(ctx, db, migrations.FS) +} + +// applyMigrationsFS 是迁移执行的核心实现。 +// 它从指定的文件系统读取 SQL 迁移文件并按顺序应用。 +// +// 迁移执行流程: +// 1. 获取 PostgreSQL Advisory Lock,防止多实例并发迁移 +// 2. 确保 schema_migrations 表存在 +// 3. 按文件名排序读取所有 .sql 文件 +// 4. 对于每个迁移文件: +// - 计算文件内容的 SHA256 校验和 +// - 检查该迁移是否已应用(通过 filename 查询) +// - 如果已应用,验证校验和是否匹配 +// - 如果未应用,在事务中执行迁移并记录 +// 5. 释放 Advisory Lock +// +// 参数: +// - ctx: 上下文 +// - db: 数据库连接 +// - fsys: 包含迁移文件的文件系统(通常是 embed.FS) +func applyMigrationsFS(ctx context.Context, db *sql.DB, fsys fs.FS) error { + if db == nil { + return errors.New("nil sql db") + } + + // 获取分布式锁,确保多实例部署时只有一个实例执行迁移。 + // 这是 PostgreSQL 特有的 Advisory Lock 机制。 + if err := pgAdvisoryLock(ctx, db); err != nil { + return err + } + defer func() { + // 无论迁移是否成功,都要释放锁。 + // 使用 context.Background() 确保即使原 ctx 已取消也能释放锁。 + _ = pgAdvisoryUnlock(context.Background(), db) + }() + + // 创建迁移记录表(如果不存在)。 + // 该表记录所有已应用的迁移及其校验和。 + if _, err := db.ExecContext(ctx, schemaMigrationsTableDDL); err != nil { + return fmt.Errorf("create schema_migrations: %w", err) + } + + // 获取所有 .sql 迁移文件并按文件名排序。 + // 命名规范:使用零填充数字前缀(如 001_init.sql, 002_add_users.sql)。 + files, err := fs.Glob(fsys, "*.sql") + if err != nil { + return fmt.Errorf("list migrations: %w", err) + } + sort.Strings(files) // 确保按文件名顺序执行迁移 + + for _, name := range files { + // 读取迁移文件内容 + contentBytes, err := fs.ReadFile(fsys, name) + if err != nil { + return fmt.Errorf("read migration %s: %w", name, err) + } + + content := strings.TrimSpace(string(contentBytes)) + if content == "" { + continue // 跳过空文件 + } + + // 计算文件内容的 SHA256 校验和,用于检测文件是否被修改。 + // 这是一种防篡改机制:如果有人修改了已应用的迁移文件,系统会拒绝启动。 + sum := sha256.Sum256([]byte(content)) + checksum := hex.EncodeToString(sum[:]) + + // 检查该迁移是否已经应用 + var existing string + rowErr := db.QueryRowContext(ctx, "SELECT checksum FROM schema_migrations WHERE filename = $1", name).Scan(&existing) + if rowErr == nil { + // 迁移已应用,验证校验和是否匹配 + if existing != checksum { + // 校验和不匹配意味着迁移文件在应用后被修改,这是危险的。 + // 正确的做法是创建新的迁移文件来进行变更。 + return fmt.Errorf("migration %s checksum mismatch (db=%s file=%s)", name, existing, checksum) + } + continue // 迁移已应用且校验和匹配,跳过 + } + if !errors.Is(rowErr, sql.ErrNoRows) { + return fmt.Errorf("check migration %s: %w", name, rowErr) + } + + // 迁移未应用,在事务中执行。 + // 使用事务确保迁移的原子性:要么完全成功,要么完全回滚。 + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("begin migration %s: %w", name, err) + } + + // 执行迁移 SQL + if _, err := tx.ExecContext(ctx, content); err != nil { + _ = tx.Rollback() + return fmt.Errorf("apply migration %s: %w", name, err) + } + + // 记录迁移已完成,保存文件名和校验和 + if _, err := tx.ExecContext(ctx, "INSERT INTO schema_migrations (filename, checksum) VALUES ($1, $2)", name, checksum); err != nil { + _ = tx.Rollback() + return fmt.Errorf("record migration %s: %w", name, err) + } + + // 提交事务 + if err := tx.Commit(); err != nil { + _ = tx.Rollback() + return fmt.Errorf("commit migration %s: %w", name, err) + } + } + + return nil +} + +// pgAdvisoryLock 获取 PostgreSQL Advisory Lock。 +// Advisory Lock 是一种轻量级的锁机制,不与任何特定的数据库对象关联。 +// 它非常适合用于应用层面的分布式锁场景,如迁移序列化。 +func pgAdvisoryLock(ctx context.Context, db *sql.DB) error { + _, err := db.ExecContext(ctx, "SELECT pg_advisory_lock($1)", migrationsAdvisoryLockID) + if err != nil { + return fmt.Errorf("acquire migrations lock: %w", err) + } + return nil +} + +// pgAdvisoryUnlock 释放 PostgreSQL Advisory Lock。 +// 必须在获取锁后确保释放,否则会阻塞其他实例的迁移操作。 +func pgAdvisoryUnlock(ctx context.Context, db *sql.DB) error { + _, err := db.ExecContext(ctx, "SELECT pg_advisory_unlock($1)", migrationsAdvisoryLockID) + if err != nil { + return fmt.Errorf("release migrations lock: %w", err) + } + return nil +} diff --git a/backend/internal/infrastructure/wire.go b/backend/internal/infrastructure/wire.go index 6f8b4463..1e64640c 100644 --- a/backend/internal/infrastructure/wire.go +++ b/backend/internal/infrastructure/wire.go @@ -1,25 +1,79 @@ package infrastructure import ( + "database/sql" + "errors" + + "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/google/wire" "github.com/redis/go-redis/v9" - "gorm.io/gorm" + + entsql "entgo.io/ent/dialect/sql" ) -// ProviderSet 提供基础设施层的依赖 +// ProviderSet 是基础设施层的 Wire 依赖提供者集合。 +// +// Wire 是 Google 开发的编译时依赖注入工具。ProviderSet 将相关的依赖提供函数 +// 组织在一起,便于在应用程序启动时自动组装依赖关系。 +// +// 包含的提供者: +// - ProvideEnt: 提供 Ent ORM 客户端 +// - ProvideSQLDB: 提供底层 SQL 数据库连接 +// - ProvideRedis: 提供 Redis 客户端 var ProviderSet = wire.NewSet( - ProvideDB, + ProvideEnt, + ProvideSQLDB, ProvideRedis, ) -// ProvideDB 提供数据库连接 -func ProvideDB(cfg *config.Config) (*gorm.DB, error) { - return InitDB(cfg) +// ProvideEnt 为依赖注入提供 Ent 客户端。 +// +// 该函数是 InitEnt 的包装器,符合 Wire 的依赖提供函数签名要求。 +// Wire 会在编译时分析依赖关系,自动生成初始化代码。 +// +// 依赖:config.Config +// 提供:*ent.Client +func ProvideEnt(cfg *config.Config) (*ent.Client, error) { + client, _, err := InitEnt(cfg) + return client, err } -// ProvideRedis 提供 Redis 客户端 +// ProvideSQLDB 从 Ent 客户端提取底层的 *sql.DB 连接。 +// +// 某些 Repository 需要直接执行原生 SQL(如复杂的批量更新、聚合查询), +// 此时需要访问底层的 sql.DB 而不是通过 Ent ORM。 +// +// 设计说明: +// - Ent 底层使用 sql.DB,通过 Driver 接口可以访问 +// - 这种设计允许在同一事务中混用 Ent 和原生 SQL +// +// 依赖:*ent.Client +// 提供:*sql.DB +func ProvideSQLDB(client *ent.Client) (*sql.DB, error) { + if client == nil { + return nil, errors.New("nil ent client") + } + // 从 Ent 客户端获取底层驱动 + drv, ok := client.Driver().(*entsql.Driver) + if !ok { + return nil, errors.New("ent driver does not expose *sql.DB") + } + // 返回驱动持有的 sql.DB 实例 + return drv.DB(), nil +} + +// ProvideRedis 为依赖注入提供 Redis 客户端。 +// +// Redis 用于: +// - 分布式锁(如并发控制) +// - 缓存(如用户会话、API 响应缓存) +// - 速率限制 +// - 实时统计数据 +// +// 依赖:config.Config +// 提供:*redis.Client func ProvideRedis(cfg *config.Config) *redis.Client { return InitRedis(cfg) } diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index fe6053ee..92ae09c4 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -1,42 +1,131 @@ +// Package repository 实现数据访问层(Repository Pattern)。 +// +// 该包提供了与数据库交互的所有操作,包括 CRUD、复杂查询和批量操作。 +// 采用 Repository 模式将数据访问逻辑与业务逻辑分离,便于测试和维护。 +// +// 主要特性: +// - 使用 Ent ORM 进行类型安全的数据库操作 +// - 对于复杂查询(如批量更新、聚合统计)使用原生 SQL +// - 提供统一的错误翻译机制,将数据库错误转换为业务错误 +// - 支持软删除,所有查询自动过滤已删除记录 package repository import ( "context" - "errors" + "database/sql" + "encoding/json" + "strconv" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" + dbaccount "github.com/Wei-Shaw/sub2api/ent/account" + dbaccountgroup "github.com/Wei-Shaw/sub2api/ent/accountgroup" + dbgroup "github.com/Wei-Shaw/sub2api/ent/group" + dbpredicate "github.com/Wei-Shaw/sub2api/ent/predicate" + dbproxy "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" - "gorm.io/datatypes" - "gorm.io/gorm" - "gorm.io/gorm/clause" + entsql "entgo.io/ent/dialect/sql" ) +// accountRepository 实现 service.AccountRepository 接口。 +// 提供 AI API 账户的完整数据访问功能。 +// +// 设计说明: +// - client: Ent 客户端,用于类型安全的 ORM 操作 +// - sql: 原生 SQL 执行器,用于复杂查询和批量操作 +// - begin: SQL 事务开启器,用于需要事务的操作 type accountRepository struct { - db *gorm.DB + client *dbent.Client // Ent ORM 客户端 + sql sqlExecutor // 原生 SQL 执行接口 + begin sqlBeginner // 事务开启接口 } -func NewAccountRepository(db *gorm.DB) service.AccountRepository { - return &accountRepository{db: db} +// NewAccountRepository 创建账户仓储实例。 +// 这是对外暴露的构造函数,返回接口类型以便于依赖注入。 +func NewAccountRepository(client *dbent.Client, sqlDB *sql.DB) service.AccountRepository { + return newAccountRepositoryWithSQL(client, sqlDB) +} + +// newAccountRepositoryWithSQL 是内部构造函数,支持依赖注入 SQL 执行器。 +// 这种设计便于单元测试时注入 mock 对象。 +func newAccountRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *accountRepository { + var beginner sqlBeginner + if b, ok := sqlq.(sqlBeginner); ok { + beginner = b + } + return &accountRepository{client: client, sql: sqlq, begin: beginner} } func (r *accountRepository) Create(ctx context.Context, account *service.Account) error { - m := accountModelFromService(account) - err := r.db.WithContext(ctx).Create(m).Error - if err == nil { - applyAccountModelToService(account, m) + if account == nil { + return nil } - return err + + builder := r.client.Account.Create(). + SetName(account.Name). + SetPlatform(account.Platform). + SetType(account.Type). + SetCredentials(normalizeJSONMap(account.Credentials)). + SetExtra(normalizeJSONMap(account.Extra)). + SetConcurrency(account.Concurrency). + SetPriority(account.Priority). + SetStatus(account.Status). + SetErrorMessage(account.ErrorMessage). + SetSchedulable(account.Schedulable) + + if account.ProxyID != nil { + builder.SetProxyID(*account.ProxyID) + } + if account.LastUsedAt != nil { + builder.SetLastUsedAt(*account.LastUsedAt) + } + if account.RateLimitedAt != nil { + builder.SetRateLimitedAt(*account.RateLimitedAt) + } + if account.RateLimitResetAt != nil { + builder.SetRateLimitResetAt(*account.RateLimitResetAt) + } + if account.OverloadUntil != nil { + builder.SetOverloadUntil(*account.OverloadUntil) + } + if account.SessionWindowStart != nil { + builder.SetSessionWindowStart(*account.SessionWindowStart) + } + if account.SessionWindowEnd != nil { + builder.SetSessionWindowEnd(*account.SessionWindowEnd) + } + if account.SessionWindowStatus != "" { + builder.SetSessionWindowStatus(account.SessionWindowStatus) + } + + created, err := builder.Save(ctx) + if err != nil { + return err + } + + account.ID = created.ID + account.CreatedAt = created.CreatedAt + account.UpdatedAt = created.UpdatedAt + return nil } func (r *accountRepository) GetByID(ctx context.Context, id int64) (*service.Account, error) { - var m accountModel - err := r.db.WithContext(ctx).Preload("Proxy").Preload("AccountGroups.Group").First(&m, id).Error + m, err := r.client.Account.Query().Where(dbaccount.IDEQ(id)).Only(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrAccountNotFound, nil) } - return accountModelToService(&m), nil + + accounts, err := r.accountsToService(ctx, []*dbent.Account{m}) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, service.ErrAccountNotFound + } + return &accounts[0], nil } func (r *accountRepository) GetByCRSAccountID(ctx context.Context, crsAccountID string) (*service.Account, error) { @@ -44,31 +133,100 @@ func (r *accountRepository) GetByCRSAccountID(ctx context.Context, crsAccountID return nil, nil } - var m accountModel - err := r.db.WithContext(ctx).Where("extra->>'crs_account_id' = ?", crsAccountID).First(&m).Error + m, err := r.client.Account.Query(). + Where(func(s *entsql.Selector) { + s.Where(entsql.ExprP("extra->>'crs_account_id' = ?", crsAccountID)) + }). + Only(ctx) if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { + if dbent.IsNotFound(err) { return nil, nil } return nil, err } - return accountModelToService(&m), nil + + accounts, err := r.accountsToService(ctx, []*dbent.Account{m}) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, nil + } + return &accounts[0], nil } func (r *accountRepository) Update(ctx context.Context, account *service.Account) error { - m := accountModelFromService(account) - err := r.db.WithContext(ctx).Save(m).Error - if err == nil { - applyAccountModelToService(account, m) + if account == nil { + return nil } - return err + + builder := r.client.Account.UpdateOneID(account.ID). + SetName(account.Name). + SetPlatform(account.Platform). + SetType(account.Type). + SetCredentials(normalizeJSONMap(account.Credentials)). + SetExtra(normalizeJSONMap(account.Extra)). + SetConcurrency(account.Concurrency). + SetPriority(account.Priority). + SetStatus(account.Status). + SetErrorMessage(account.ErrorMessage). + SetSchedulable(account.Schedulable) + + if account.ProxyID != nil { + builder.SetProxyID(*account.ProxyID) + } else { + builder.ClearProxyID() + } + if account.LastUsedAt != nil { + builder.SetLastUsedAt(*account.LastUsedAt) + } else { + builder.ClearLastUsedAt() + } + if account.RateLimitedAt != nil { + builder.SetRateLimitedAt(*account.RateLimitedAt) + } else { + builder.ClearRateLimitedAt() + } + if account.RateLimitResetAt != nil { + builder.SetRateLimitResetAt(*account.RateLimitResetAt) + } else { + builder.ClearRateLimitResetAt() + } + if account.OverloadUntil != nil { + builder.SetOverloadUntil(*account.OverloadUntil) + } else { + builder.ClearOverloadUntil() + } + if account.SessionWindowStart != nil { + builder.SetSessionWindowStart(*account.SessionWindowStart) + } else { + builder.ClearSessionWindowStart() + } + if account.SessionWindowEnd != nil { + builder.SetSessionWindowEnd(*account.SessionWindowEnd) + } else { + builder.ClearSessionWindowEnd() + } + if account.SessionWindowStatus != "" { + builder.SetSessionWindowStatus(account.SessionWindowStatus) + } else { + builder.ClearSessionWindowStatus() + } + + updated, err := builder.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrAccountNotFound, nil) + } + account.UpdatedAt = updated.UpdatedAt + return nil } func (r *accountRepository) Delete(ctx context.Context, id int64) error { - if err := r.db.WithContext(ctx).Where("account_id = ?", id).Delete(&accountGroupModel{}).Error; err != nil { + if _, err := r.client.AccountGroup.Delete().Where(dbaccountgroup.AccountIDEQ(id)).Exec(ctx); err != nil { return err } - return r.db.WithContext(ctx).Delete(&accountModel{}, id).Error + _, err := r.client.Account.Delete().Where(dbaccount.IDEQ(id)).Exec(ctx) + return err } func (r *accountRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.Account, *pagination.PaginationResult, error) { @@ -76,99 +234,84 @@ func (r *accountRepository) List(ctx context.Context, params pagination.Paginati } func (r *accountRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]service.Account, *pagination.PaginationResult, error) { - var accounts []accountModel - var total int64 - - db := r.db.WithContext(ctx).Model(&accountModel{}) + q := r.client.Account.Query() if platform != "" { - db = db.Where("platform = ?", platform) + q = q.Where(dbaccount.PlatformEQ(platform)) } if accountType != "" { - db = db.Where("type = ?", accountType) + q = q.Where(dbaccount.TypeEQ(accountType)) } if status != "" { - db = db.Where("status = ?", status) + q = q.Where(dbaccount.StatusEQ(status)) } if search != "" { - searchPattern := "%" + search + "%" - db = db.Where("name ILIKE ?", searchPattern) + q = q.Where(dbaccount.NameContainsFold(search)) } - if err := db.Count(&total).Error; err != nil { + total, err := q.Count(ctx) + if err != nil { return nil, nil, err } - if err := db.Preload("Proxy").Preload("AccountGroups.Group").Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&accounts).Error; err != nil { + accounts, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(dbaccount.FieldID)). + All(ctx) + if err != nil { return nil, nil, err } - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) + outAccounts, err := r.accountsToService(ctx, accounts) + if err != nil { + return nil, nil, err } - - return outAccounts, paginationResultFromTotal(total, params), nil + return outAccounts, paginationResultFromTotal(int64(total), params), nil } func (r *accountRepository) ListByGroup(ctx context.Context, groupID int64) ([]service.Account, error) { - var accounts []accountModel - err := r.db.WithContext(ctx). - Joins("JOIN account_groups ON account_groups.account_id = accounts.id"). - Where("account_groups.group_id = ? AND accounts.status = ?", groupID, service.StatusActive). - Preload("Proxy"). - Order("account_groups.priority ASC, accounts.priority ASC"). - Find(&accounts).Error + accounts, err := r.queryAccountsByGroup(ctx, groupID, accountGroupQueryOptions{ + status: service.StatusActive, + }) if err != nil { return nil, err } - - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) - } - return outAccounts, nil + return accounts, nil } func (r *accountRepository) ListActive(ctx context.Context) ([]service.Account, error) { - var accounts []accountModel - err := r.db.WithContext(ctx). - Where("status = ?", service.StatusActive). - Preload("Proxy"). - Order("priority ASC"). - Find(&accounts).Error + accounts, err := r.client.Account.Query(). + Where(dbaccount.StatusEQ(service.StatusActive)). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) if err != nil { return nil, err } - - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) - } - return outAccounts, nil + return r.accountsToService(ctx, accounts) } func (r *accountRepository) ListByPlatform(ctx context.Context, platform string) ([]service.Account, error) { - var accounts []accountModel - err := r.db.WithContext(ctx). - Where("platform = ? AND status = ?", platform, service.StatusActive). - Preload("Proxy"). - Order("priority ASC"). - Find(&accounts).Error + accounts, err := r.client.Account.Query(). + Where( + dbaccount.PlatformEQ(platform), + dbaccount.StatusEQ(service.StatusActive), + ). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) if err != nil { return nil, err } - - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) - } - return outAccounts, nil + return r.accountsToService(ctx, accounts) } func (r *accountRepository) UpdateLastUsed(ctx context.Context, id int64) error { now := time.Now() - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id).Update("last_used_at", now).Error + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetLastUsedAt(now). + Save(ctx) + return err } func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { @@ -176,63 +319,72 @@ func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map return nil } - var caseSql = "UPDATE accounts SET last_used_at = CASE id" - var args []any - var ids []int64 + ids := make([]int64, 0, len(updates)) + args := make([]any, 0, len(updates)*2+1) + caseSQL := "UPDATE accounts SET last_used_at = CASE id" + idx := 1 for id, ts := range updates { - caseSql += " WHEN ? THEN CAST(? AS TIMESTAMP)" + caseSQL += " WHEN $" + itoa(idx) + " THEN $" + itoa(idx+1) args = append(args, id, ts) ids = append(ids, id) + idx += 2 } - caseSql += " END WHERE id IN ?" - args = append(args, ids) + caseSQL += " END, updated_at = NOW() WHERE id = ANY($" + itoa(idx) + ") AND deleted_at IS NULL" + args = append(args, pq.Array(ids)) - return r.db.WithContext(ctx).Exec(caseSql, args...).Error + _, err := r.sql.ExecContext(ctx, caseSQL, args...) + return err } func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg string) error { - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id). - Updates(map[string]any{ - "status": service.StatusError, - "error_message": errorMsg, - }).Error + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetStatus(service.StatusError). + SetErrorMessage(errorMsg). + Save(ctx) + return err } func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID int64, priority int) error { - ag := &accountGroupModel{ - AccountID: accountID, - GroupID: groupID, - Priority: priority, - } - return r.db.WithContext(ctx).Create(ag).Error + _, err := r.client.AccountGroup.Create(). + SetAccountID(accountID). + SetGroupID(groupID). + SetPriority(priority). + Save(ctx) + return err } func (r *accountRepository) RemoveFromGroup(ctx context.Context, accountID, groupID int64) error { - return r.db.WithContext(ctx).Where("account_id = ? AND group_id = ?", accountID, groupID). - Delete(&accountGroupModel{}).Error + _, err := r.client.AccountGroup.Delete(). + Where( + dbaccountgroup.AccountIDEQ(accountID), + dbaccountgroup.GroupIDEQ(groupID), + ). + Exec(ctx) + return err } func (r *accountRepository) GetGroups(ctx context.Context, accountID int64) ([]service.Group, error) { - var groups []groupModel - err := r.db.WithContext(ctx). - Joins("JOIN account_groups ON account_groups.group_id = groups.id"). - Where("account_groups.account_id = ?", accountID). - Find(&groups).Error + groups, err := r.client.Group.Query(). + Where( + dbgroup.HasAccountsWith(dbaccount.IDEQ(accountID)), + ). + All(ctx) if err != nil { return nil, err } outGroups := make([]service.Group, 0, len(groups)) for i := range groups { - outGroups = append(outGroups, *groupModelToService(&groups[i])) + outGroups = append(outGroups, *groupEntityToService(groups[i])) } return outGroups, nil } func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { - if err := r.db.WithContext(ctx).Where("account_id = ?", accountID).Delete(&accountGroupModel{}).Error; err != nil { + if _, err := r.client.AccountGroup.Delete().Where(dbaccountgroup.AccountIDEQ(accountID)).Exec(ctx); err != nil { return err } @@ -240,142 +392,117 @@ func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, gro return nil } - accountGroups := make([]accountGroupModel, 0, len(groupIDs)) + builders := make([]*dbent.AccountGroupCreate, 0, len(groupIDs)) for i, groupID := range groupIDs { - accountGroups = append(accountGroups, accountGroupModel{ - AccountID: accountID, - GroupID: groupID, - Priority: i + 1, - }) + builders = append(builders, r.client.AccountGroup.Create(). + SetAccountID(accountID). + SetGroupID(groupID). + SetPriority(i+1), + ) } - return r.db.WithContext(ctx).Create(&accountGroups).Error + + _, err := r.client.AccountGroup.CreateBulk(builders...).Save(ctx) + return err } func (r *accountRepository) ListSchedulable(ctx context.Context) ([]service.Account, error) { - var accounts []accountModel now := time.Now() - err := r.db.WithContext(ctx). - Where("status = ? AND schedulable = ?", service.StatusActive, true). - Where("(overload_until IS NULL OR overload_until <= ?)", now). - Where("(rate_limit_reset_at IS NULL OR rate_limit_reset_at <= ?)", now). - Preload("Proxy"). - Order("priority ASC"). - Find(&accounts).Error + accounts, err := r.client.Account.Query(). + Where( + dbaccount.StatusEQ(service.StatusActive), + dbaccount.SchedulableEQ(true), + dbaccount.Or(dbaccount.OverloadUntilIsNil(), dbaccount.OverloadUntilLTE(now)), + dbaccount.Or(dbaccount.RateLimitResetAtIsNil(), dbaccount.RateLimitResetAtLTE(now)), + ). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) if err != nil { return nil, err } - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) - } - return outAccounts, nil + return r.accountsToService(ctx, accounts) } func (r *accountRepository) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]service.Account, error) { - var accounts []accountModel - now := time.Now() - err := r.db.WithContext(ctx). - Joins("JOIN account_groups ON account_groups.account_id = accounts.id"). - Where("account_groups.group_id = ?", groupID). - Where("accounts.status = ? AND accounts.schedulable = ?", service.StatusActive, true). - Where("(accounts.overload_until IS NULL OR accounts.overload_until <= ?)", now). - Where("(accounts.rate_limit_reset_at IS NULL OR accounts.rate_limit_reset_at <= ?)", now). - Preload("Proxy"). - Order("account_groups.priority ASC, accounts.priority ASC"). - Find(&accounts).Error - if err != nil { - return nil, err - } - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) - } - return outAccounts, nil + return r.queryAccountsByGroup(ctx, groupID, accountGroupQueryOptions{ + status: service.StatusActive, + schedulable: true, + }) } func (r *accountRepository) ListSchedulableByPlatform(ctx context.Context, platform string) ([]service.Account, error) { - var accounts []accountModel now := time.Now() - err := r.db.WithContext(ctx). - Where("platform = ?", platform). - Where("status = ? AND schedulable = ?", service.StatusActive, true). - Where("(overload_until IS NULL OR overload_until <= ?)", now). - Where("(rate_limit_reset_at IS NULL OR rate_limit_reset_at <= ?)", now). - Preload("Proxy"). - Order("priority ASC"). - Find(&accounts).Error + accounts, err := r.client.Account.Query(). + Where( + dbaccount.PlatformEQ(platform), + dbaccount.StatusEQ(service.StatusActive), + dbaccount.SchedulableEQ(true), + dbaccount.Or(dbaccount.OverloadUntilIsNil(), dbaccount.OverloadUntilLTE(now)), + dbaccount.Or(dbaccount.RateLimitResetAtIsNil(), dbaccount.RateLimitResetAtLTE(now)), + ). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) if err != nil { return nil, err } - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) - } - return outAccounts, nil + return r.accountsToService(ctx, accounts) } func (r *accountRepository) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]service.Account, error) { - var accounts []accountModel - now := time.Now() - err := r.db.WithContext(ctx). - Joins("JOIN account_groups ON account_groups.account_id = accounts.id"). - Where("account_groups.group_id = ?", groupID). - Where("accounts.platform = ?", platform). - Where("accounts.status = ? AND accounts.schedulable = ?", service.StatusActive, true). - Where("(accounts.overload_until IS NULL OR accounts.overload_until <= ?)", now). - Where("(accounts.rate_limit_reset_at IS NULL OR accounts.rate_limit_reset_at <= ?)", now). - Preload("Proxy"). - Order("account_groups.priority ASC, accounts.priority ASC"). - Find(&accounts).Error - if err != nil { - return nil, err - } - outAccounts := make([]service.Account, 0, len(accounts)) - for i := range accounts { - outAccounts = append(outAccounts, *accountModelToService(&accounts[i])) - } - return outAccounts, nil + return r.queryAccountsByGroup(ctx, groupID, accountGroupQueryOptions{ + status: service.StatusActive, + schedulable: true, + platform: platform, + }) } func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { now := time.Now() - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id). - Updates(map[string]any{ - "rate_limited_at": now, - "rate_limit_reset_at": resetAt, - }).Error + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetRateLimitedAt(now). + SetRateLimitResetAt(resetAt). + Save(ctx) + return err } func (r *accountRepository) SetOverloaded(ctx context.Context, id int64, until time.Time) error { - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id). - Update("overload_until", until).Error + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetOverloadUntil(until). + Save(ctx) + return err } func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id). - Updates(map[string]any{ - "rate_limited_at": nil, - "rate_limit_reset_at": nil, - "overload_until": nil, - }).Error + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + ClearRateLimitedAt(). + ClearRateLimitResetAt(). + ClearOverloadUntil(). + Save(ctx) + return err } func (r *accountRepository) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { - updates := map[string]any{ - "session_window_status": status, - } + builder := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetSessionWindowStatus(status) if start != nil { - updates["session_window_start"] = start + builder.SetSessionWindowStart(*start) } if end != nil { - updates["session_window_end"] = end + builder.SetSessionWindowEnd(*end) } - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id).Updates(updates).Error + _, err := builder.Save(ctx) + return err } func (r *accountRepository) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id). - Update("schedulable", schedulable).Error + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetSchedulable(schedulable). + Save(ctx) + return err } func (r *accountRepository) UpdateExtra(ctx context.Context, id int64, updates map[string]any) error { @@ -383,20 +510,24 @@ func (r *accountRepository) UpdateExtra(ctx context.Context, id int64, updates m return nil } - var account accountModel - if err := r.db.WithContext(ctx).Select("extra").Where("id = ?", id).First(&account).Error; err != nil { - return err + accountExtra, err := r.client.Account.Query(). + Where(dbaccount.IDEQ(id)). + Select(dbaccount.FieldExtra). + Only(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrAccountNotFound, nil) } - if account.Extra == nil { - account.Extra = datatypes.JSONMap{} - } + extra := normalizeJSONMap(accountExtra.Extra) for k, v := range updates { - account.Extra[k] = v + extra[k] = v } - return r.db.WithContext(ctx).Model(&accountModel{}).Where("id = ?", id). - Update("extra", account.Extra).Error + _, err = r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetExtra(extra). + Save(ctx) + return err } func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates service.AccountBulkUpdate) (int64, error) { @@ -404,129 +535,260 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates return 0, nil } - updateMap := map[string]any{} + setClauses := make([]string, 0, 8) + args := make([]any, 0, 8) + idx := 1 if updates.Name != nil { - updateMap["name"] = *updates.Name + setClauses = append(setClauses, "name = $"+itoa(idx)) + args = append(args, *updates.Name) + idx++ } if updates.ProxyID != nil { - updateMap["proxy_id"] = updates.ProxyID + setClauses = append(setClauses, "proxy_id = $"+itoa(idx)) + args = append(args, *updates.ProxyID) + idx++ } if updates.Concurrency != nil { - updateMap["concurrency"] = *updates.Concurrency + setClauses = append(setClauses, "concurrency = $"+itoa(idx)) + args = append(args, *updates.Concurrency) + idx++ } if updates.Priority != nil { - updateMap["priority"] = *updates.Priority + setClauses = append(setClauses, "priority = $"+itoa(idx)) + args = append(args, *updates.Priority) + idx++ } if updates.Status != nil { - updateMap["status"] = *updates.Status + setClauses = append(setClauses, "status = $"+itoa(idx)) + args = append(args, *updates.Status) + idx++ } + // JSONB 需要合并而非覆盖,使用 raw SQL 保持旧行为。 if len(updates.Credentials) > 0 { - updateMap["credentials"] = gorm.Expr("COALESCE(credentials,'{}') || ?", datatypes.JSONMap(updates.Credentials)) + payload, err := json.Marshal(updates.Credentials) + if err != nil { + return 0, err + } + setClauses = append(setClauses, "credentials = COALESCE(credentials, '{}'::jsonb) || $"+itoa(idx)+"::jsonb") + args = append(args, payload) + idx++ } if len(updates.Extra) > 0 { - updateMap["extra"] = gorm.Expr("COALESCE(extra,'{}') || ?", datatypes.JSONMap(updates.Extra)) + payload, err := json.Marshal(updates.Extra) + if err != nil { + return 0, err + } + setClauses = append(setClauses, "extra = COALESCE(extra, '{}'::jsonb) || $"+itoa(idx)+"::jsonb") + args = append(args, payload) + idx++ } - if len(updateMap) == 0 { + if len(setClauses) == 0 { return 0, nil } - result := r.db.WithContext(ctx). - Model(&accountModel{}). - Where("id IN ?", ids). - Clauses(clause.Returning{}). - Updates(updateMap) + setClauses = append(setClauses, "updated_at = NOW()") - return result.RowsAffected, result.Error -} + query := "UPDATE accounts SET " + joinClauses(setClauses, ", ") + " WHERE id = ANY($" + itoa(idx) + ") AND deleted_at IS NULL" + args = append(args, pq.Array(ids)) -type accountModel struct { - ID int64 `gorm:"primaryKey"` - Name string `gorm:"size:100;not null"` - Platform string `gorm:"size:50;not null"` - Type string `gorm:"size:20;not null"` - Credentials datatypes.JSONMap `gorm:"type:jsonb;default:'{}'"` - Extra datatypes.JSONMap `gorm:"type:jsonb;default:'{}'"` - ProxyID *int64 `gorm:"index"` - Concurrency int `gorm:"default:3;not null"` - Priority int `gorm:"default:50;not null"` - Status string `gorm:"size:20;default:active;not null"` - ErrorMessage string `gorm:"type:text"` - LastUsedAt *time.Time `gorm:"index"` - CreatedAt time.Time `gorm:"not null"` - UpdatedAt time.Time `gorm:"not null"` - DeletedAt gorm.DeletedAt `gorm:"index"` - - Schedulable bool `gorm:"default:true;not null"` - - RateLimitedAt *time.Time `gorm:"index"` - RateLimitResetAt *time.Time `gorm:"index"` - OverloadUntil *time.Time `gorm:"index"` - - SessionWindowStart *time.Time - SessionWindowEnd *time.Time - SessionWindowStatus string `gorm:"size:20"` - - Proxy *proxyModel `gorm:"foreignKey:ProxyID"` - AccountGroups []accountGroupModel `gorm:"foreignKey:AccountID"` -} - -func (accountModel) TableName() string { return "accounts" } - -type accountGroupModel struct { - AccountID int64 `gorm:"primaryKey"` - GroupID int64 `gorm:"primaryKey"` - Priority int `gorm:"default:50;not null"` - CreatedAt time.Time `gorm:"not null"` - - Account *accountModel `gorm:"foreignKey:AccountID"` - Group *groupModel `gorm:"foreignKey:GroupID"` -} - -func (accountGroupModel) TableName() string { return "account_groups" } - -func accountGroupModelToService(m *accountGroupModel) *service.AccountGroup { - if m == nil { - return nil + result, err := r.sql.ExecContext(ctx, query, args...) + if err != nil { + return 0, err } - return &service.AccountGroup{ - AccountID: m.AccountID, - GroupID: m.GroupID, - Priority: m.Priority, - CreatedAt: m.CreatedAt, - Account: accountModelToService(m.Account), - Group: groupModelToService(m.Group), + rows, err := result.RowsAffected() + if err != nil { + return 0, err } + return rows, nil } -func accountModelToService(m *accountModel) *service.Account { +type accountGroupQueryOptions struct { + status string + schedulable bool + platform string +} + +func (r *accountRepository) queryAccountsByGroup(ctx context.Context, groupID int64, opts accountGroupQueryOptions) ([]service.Account, error) { + q := r.client.AccountGroup.Query(). + Where(dbaccountgroup.GroupIDEQ(groupID)) + + preds := make([]dbpredicate.Account, 0, 6) + preds = append(preds, dbaccount.DeletedAtIsNil()) + if opts.status != "" { + preds = append(preds, dbaccount.StatusEQ(opts.status)) + } + if opts.platform != "" { + preds = append(preds, dbaccount.PlatformEQ(opts.platform)) + } + if opts.schedulable { + now := time.Now() + preds = append(preds, + dbaccount.SchedulableEQ(true), + dbaccount.Or(dbaccount.OverloadUntilIsNil(), dbaccount.OverloadUntilLTE(now)), + dbaccount.Or(dbaccount.RateLimitResetAtIsNil(), dbaccount.RateLimitResetAtLTE(now)), + ) + } + + if len(preds) > 0 { + q = q.Where(dbaccountgroup.HasAccountWith(preds...)) + } + + groups, err := q. + Order( + dbaccountgroup.ByPriority(), + dbaccountgroup.ByAccountField(dbaccount.FieldPriority), + ). + WithAccount(). + All(ctx) + if err != nil { + return nil, err + } + + orderedIDs := make([]int64, 0, len(groups)) + accountMap := make(map[int64]*dbent.Account, len(groups)) + for _, ag := range groups { + if ag.Edges.Account == nil { + continue + } + if _, exists := accountMap[ag.AccountID]; exists { + continue + } + accountMap[ag.AccountID] = ag.Edges.Account + orderedIDs = append(orderedIDs, ag.AccountID) + } + + accounts := make([]*dbent.Account, 0, len(orderedIDs)) + for _, id := range orderedIDs { + if acc, ok := accountMap[id]; ok { + accounts = append(accounts, acc) + } + } + + return r.accountsToService(ctx, accounts) +} + +func (r *accountRepository) accountsToService(ctx context.Context, accounts []*dbent.Account) ([]service.Account, error) { + if len(accounts) == 0 { + return []service.Account{}, nil + } + + accountIDs := make([]int64, 0, len(accounts)) + proxyIDs := make([]int64, 0, len(accounts)) + for _, acc := range accounts { + accountIDs = append(accountIDs, acc.ID) + if acc.ProxyID != nil { + proxyIDs = append(proxyIDs, *acc.ProxyID) + } + } + + proxyMap, err := r.loadProxies(ctx, proxyIDs) + if err != nil { + return nil, err + } + groupsByAccount, groupIDsByAccount, accountGroupsByAccount, err := r.loadAccountGroups(ctx, accountIDs) + if err != nil { + return nil, err + } + + outAccounts := make([]service.Account, 0, len(accounts)) + for _, acc := range accounts { + out := accountEntityToService(acc) + if out == nil { + continue + } + if acc.ProxyID != nil { + if proxy, ok := proxyMap[*acc.ProxyID]; ok { + out.Proxy = proxy + } + } + if groups, ok := groupsByAccount[acc.ID]; ok { + out.Groups = groups + } + if groupIDs, ok := groupIDsByAccount[acc.ID]; ok { + out.GroupIDs = groupIDs + } + if ags, ok := accountGroupsByAccount[acc.ID]; ok { + out.AccountGroups = ags + } + outAccounts = append(outAccounts, *out) + } + + return outAccounts, nil +} + +func (r *accountRepository) loadProxies(ctx context.Context, proxyIDs []int64) (map[int64]*service.Proxy, error) { + proxyMap := make(map[int64]*service.Proxy) + if len(proxyIDs) == 0 { + return proxyMap, nil + } + + proxies, err := r.client.Proxy.Query().Where(dbproxy.IDIn(proxyIDs...)).All(ctx) + if err != nil { + return nil, err + } + + for _, p := range proxies { + proxyMap[p.ID] = proxyEntityToService(p) + } + return proxyMap, nil +} + +func (r *accountRepository) loadAccountGroups(ctx context.Context, accountIDs []int64) (map[int64][]*service.Group, map[int64][]int64, map[int64][]service.AccountGroup, error) { + groupsByAccount := make(map[int64][]*service.Group) + groupIDsByAccount := make(map[int64][]int64) + accountGroupsByAccount := make(map[int64][]service.AccountGroup) + + if len(accountIDs) == 0 { + return groupsByAccount, groupIDsByAccount, accountGroupsByAccount, nil + } + + entries, err := r.client.AccountGroup.Query(). + Where(dbaccountgroup.AccountIDIn(accountIDs...)). + WithGroup(). + Order(dbaccountgroup.ByAccountID(), dbaccountgroup.ByPriority()). + All(ctx) + if err != nil { + return nil, nil, nil, err + } + + for _, ag := range entries { + groupSvc := groupEntityToService(ag.Edges.Group) + agSvc := service.AccountGroup{ + AccountID: ag.AccountID, + GroupID: ag.GroupID, + Priority: ag.Priority, + CreatedAt: ag.CreatedAt, + Group: groupSvc, + } + accountGroupsByAccount[ag.AccountID] = append(accountGroupsByAccount[ag.AccountID], agSvc) + groupIDsByAccount[ag.AccountID] = append(groupIDsByAccount[ag.AccountID], ag.GroupID) + if groupSvc != nil { + groupsByAccount[ag.AccountID] = append(groupsByAccount[ag.AccountID], groupSvc) + } + } + + return groupsByAccount, groupIDsByAccount, accountGroupsByAccount, nil +} + +func accountEntityToService(m *dbent.Account) *service.Account { if m == nil { return nil } - var credentials map[string]any - if m.Credentials != nil { - credentials = map[string]any(m.Credentials) - } - - var extra map[string]any - if m.Extra != nil { - extra = map[string]any(m.Extra) - } - - account := &service.Account{ + return &service.Account{ ID: m.ID, Name: m.Name, Platform: m.Platform, Type: m.Type, - Credentials: credentials, - Extra: extra, + Credentials: copyJSONMap(m.Credentials), + Extra: copyJSONMap(m.Extra), ProxyID: m.ProxyID, Concurrency: m.Concurrency, Priority: m.Priority, Status: m.Status, - ErrorMessage: m.ErrorMessage, + ErrorMessage: derefString(m.ErrorMessage), LastUsedAt: m.LastUsedAt, CreatedAt: m.CreatedAt, UpdatedAt: m.UpdatedAt, @@ -536,75 +798,39 @@ func accountModelToService(m *accountModel) *service.Account { OverloadUntil: m.OverloadUntil, SessionWindowStart: m.SessionWindowStart, SessionWindowEnd: m.SessionWindowEnd, - SessionWindowStatus: m.SessionWindowStatus, - Proxy: proxyModelToService(m.Proxy), + SessionWindowStatus: derefString(m.SessionWindowStatus), } - - if len(m.AccountGroups) > 0 { - account.AccountGroups = make([]service.AccountGroup, 0, len(m.AccountGroups)) - account.GroupIDs = make([]int64, 0, len(m.AccountGroups)) - account.Groups = make([]*service.Group, 0, len(m.AccountGroups)) - for i := range m.AccountGroups { - ag := accountGroupModelToService(&m.AccountGroups[i]) - if ag == nil { - continue - } - account.AccountGroups = append(account.AccountGroups, *ag) - account.GroupIDs = append(account.GroupIDs, ag.GroupID) - if ag.Group != nil { - account.Groups = append(account.Groups, ag.Group) - } - } - } - - return account } -func accountModelFromService(a *service.Account) *accountModel { - if a == nil { +func normalizeJSONMap(in map[string]any) map[string]any { + if in == nil { + return map[string]any{} + } + return in +} + +func copyJSONMap(in map[string]any) map[string]any { + if in == nil { return nil } - - var credentials datatypes.JSONMap - if a.Credentials != nil { - credentials = datatypes.JSONMap(a.Credentials) - } - - var extra datatypes.JSONMap - if a.Extra != nil { - extra = datatypes.JSONMap(a.Extra) - } - - return &accountModel{ - ID: a.ID, - Name: a.Name, - Platform: a.Platform, - Type: a.Type, - Credentials: credentials, - Extra: extra, - ProxyID: a.ProxyID, - Concurrency: a.Concurrency, - Priority: a.Priority, - Status: a.Status, - ErrorMessage: a.ErrorMessage, - LastUsedAt: a.LastUsedAt, - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - Schedulable: a.Schedulable, - RateLimitedAt: a.RateLimitedAt, - RateLimitResetAt: a.RateLimitResetAt, - OverloadUntil: a.OverloadUntil, - SessionWindowStart: a.SessionWindowStart, - SessionWindowEnd: a.SessionWindowEnd, - SessionWindowStatus: a.SessionWindowStatus, + out := make(map[string]any, len(in)) + for k, v := range in { + out[k] = v } + return out } -func applyAccountModelToService(account *service.Account, m *accountModel) { - if account == nil || m == nil { - return +func joinClauses(clauses []string, sep string) string { + if len(clauses) == 0 { + return "" } - account.ID = m.ID - account.CreatedAt = m.CreatedAt - account.UpdatedAt = m.UpdatedAt + out := clauses[0] + for i := 1; i < len(clauses); i++ { + out += sep + clauses[i] + } + return out +} + +func itoa(v int) string { + return strconv.Itoa(v) } diff --git a/backend/internal/repository/account_repo_integration_test.go b/backend/internal/repository/account_repo_integration_test.go index d35ce053..41874549 100644 --- a/backend/internal/repository/account_repo_integration_test.go +++ b/backend/internal/repository/account_repo_integration_test.go @@ -4,27 +4,31 @@ package repository import ( "context" + "database/sql" "testing" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/datatypes" - "gorm.io/gorm" ) type AccountRepoSuite struct { suite.Suite ctx context.Context - db *gorm.DB + tx *sql.Tx + client *dbent.Client repo *accountRepository } func (s *AccountRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewAccountRepository(s.db).(*accountRepository) + client, tx := testEntSQLTx(s.T()) + s.client = client + s.tx = tx + s.repo = newAccountRepositoryWithSQL(client, tx) } func TestAccountRepoSuite(t *testing.T) { @@ -61,7 +65,7 @@ func (s *AccountRepoSuite) TestGetByID_NotFound() { } func (s *AccountRepoSuite) TestUpdate() { - account := accountModelToService(mustCreateAccount(s.T(), s.db, &accountModel{Name: "original"})) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "original"}) account.Name = "updated" err := s.repo.Update(s.ctx, account) @@ -73,7 +77,7 @@ func (s *AccountRepoSuite) TestUpdate() { } func (s *AccountRepoSuite) TestDelete() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "to-delete"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "to-delete"}) err := s.repo.Delete(s.ctx, account.ID) s.Require().NoError(err, "Delete") @@ -83,23 +87,23 @@ func (s *AccountRepoSuite) TestDelete() { } func (s *AccountRepoSuite) TestDelete_WithGroupBindings() { - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-del"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-del"}) - mustBindAccountToGroup(s.T(), s.db, account.ID, group.ID, 1) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-del"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-del"}) + mustBindAccountToGroup(s.T(), s.client, account.ID, group.ID, 1) err := s.repo.Delete(s.ctx, account.ID) s.Require().NoError(err, "Delete should cascade remove bindings") - var count int64 - s.db.Model(&accountGroupModel{}).Where("account_id = ?", account.ID).Count(&count) + count, err := s.client.AccountGroup.Query().Where(accountgroup.AccountIDEQ(account.ID)).Count(s.ctx) + s.Require().NoError(err) s.Require().Zero(count, "expected bindings to be removed") } // --- List / ListWithFilters --- func (s *AccountRepoSuite) TestList() { - mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc1"}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc2"}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc1"}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc2"}) accounts, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "List") @@ -110,7 +114,7 @@ func (s *AccountRepoSuite) TestList() { func (s *AccountRepoSuite) TestListWithFilters() { tests := []struct { name string - setup func(db *gorm.DB) + setup func(client *dbent.Client) platform string accType string status string @@ -120,9 +124,9 @@ func (s *AccountRepoSuite) TestListWithFilters() { }{ { name: "filter_by_platform", - setup: func(db *gorm.DB) { - mustCreateAccount(s.T(), db, &accountModel{Name: "a1", Platform: service.PlatformAnthropic}) - mustCreateAccount(s.T(), db, &accountModel{Name: "a2", Platform: service.PlatformOpenAI}) + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "a1", Platform: service.PlatformAnthropic}) + mustCreateAccount(s.T(), client, &service.Account{Name: "a2", Platform: service.PlatformOpenAI}) }, platform: service.PlatformOpenAI, wantCount: 1, @@ -132,9 +136,9 @@ func (s *AccountRepoSuite) TestListWithFilters() { }, { name: "filter_by_type", - setup: func(db *gorm.DB) { - mustCreateAccount(s.T(), db, &accountModel{Name: "t1", Type: service.AccountTypeOAuth}) - mustCreateAccount(s.T(), db, &accountModel{Name: "t2", Type: service.AccountTypeApiKey}) + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "t1", Type: service.AccountTypeOAuth}) + mustCreateAccount(s.T(), client, &service.Account{Name: "t2", Type: service.AccountTypeApiKey}) }, accType: service.AccountTypeApiKey, wantCount: 1, @@ -144,9 +148,9 @@ func (s *AccountRepoSuite) TestListWithFilters() { }, { name: "filter_by_status", - setup: func(db *gorm.DB) { - mustCreateAccount(s.T(), db, &accountModel{Name: "s1", Status: service.StatusActive}) - mustCreateAccount(s.T(), db, &accountModel{Name: "s2", Status: service.StatusDisabled}) + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "s1", Status: service.StatusActive}) + mustCreateAccount(s.T(), client, &service.Account{Name: "s2", Status: service.StatusDisabled}) }, status: service.StatusDisabled, wantCount: 1, @@ -156,9 +160,9 @@ func (s *AccountRepoSuite) TestListWithFilters() { }, { name: "filter_by_search", - setup: func(db *gorm.DB) { - mustCreateAccount(s.T(), db, &accountModel{Name: "alpha-account"}) - mustCreateAccount(s.T(), db, &accountModel{Name: "beta-account"}) + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "alpha-account"}) + mustCreateAccount(s.T(), client, &service.Account{Name: "beta-account"}) }, search: "alpha", wantCount: 1, @@ -171,11 +175,11 @@ func (s *AccountRepoSuite) TestListWithFilters() { for _, tt := range tests { s.Run(tt.name, func() { // 每个 case 重新获取隔离资源 - db := testTx(s.T()) - repo := NewAccountRepository(db).(*accountRepository) + client, tx := testEntSQLTx(s.T()) + repo := newAccountRepositoryWithSQL(client, tx) ctx := context.Background() - tt.setup(db) + tt.setup(client) accounts, _, err := repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, tt.platform, tt.accType, tt.status, tt.search) s.Require().NoError(err) @@ -190,11 +194,11 @@ func (s *AccountRepoSuite) TestListWithFilters() { // --- ListByGroup / ListActive / ListByPlatform --- func (s *AccountRepoSuite) TestListByGroup() { - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-list"}) - acc1 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1", Status: service.StatusActive}) - acc2 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2", Status: service.StatusActive}) - mustBindAccountToGroup(s.T(), s.db, acc1.ID, group.ID, 2) - mustBindAccountToGroup(s.T(), s.db, acc2.ID, group.ID, 1) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-list"}) + acc1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a1", Status: service.StatusActive}) + acc2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a2", Status: service.StatusActive}) + mustBindAccountToGroup(s.T(), s.client, acc1.ID, group.ID, 2) + mustBindAccountToGroup(s.T(), s.client, acc2.ID, group.ID, 1) accounts, err := s.repo.ListByGroup(s.ctx, group.ID) s.Require().NoError(err, "ListByGroup") @@ -204,8 +208,8 @@ func (s *AccountRepoSuite) TestListByGroup() { } func (s *AccountRepoSuite) TestListActive() { - mustCreateAccount(s.T(), s.db, &accountModel{Name: "active1", Status: service.StatusActive}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "inactive1", Status: service.StatusDisabled}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "active1", Status: service.StatusActive}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "inactive1", Status: service.StatusDisabled}) accounts, err := s.repo.ListActive(s.ctx) s.Require().NoError(err, "ListActive") @@ -214,8 +218,8 @@ func (s *AccountRepoSuite) TestListActive() { } func (s *AccountRepoSuite) TestListByPlatform() { - mustCreateAccount(s.T(), s.db, &accountModel{Name: "p1", Platform: service.PlatformAnthropic, Status: service.StatusActive}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "p2", Platform: service.PlatformOpenAI, Status: service.StatusActive}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "p1", Platform: service.PlatformAnthropic, Status: service.StatusActive}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "p2", Platform: service.PlatformOpenAI, Status: service.StatusActive}) accounts, err := s.repo.ListByPlatform(s.ctx, service.PlatformAnthropic) s.Require().NoError(err, "ListByPlatform") @@ -226,14 +230,14 @@ func (s *AccountRepoSuite) TestListByPlatform() { // --- Preload and VirtualFields --- func (s *AccountRepoSuite) TestPreload_And_VirtualFields() { - proxy := mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p1"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g1"}) + proxy := mustCreateProxy(s.T(), s.client, &service.Proxy{Name: "p1"}) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g1"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{ + account := mustCreateAccount(s.T(), s.client, &service.Account{ Name: "acc1", ProxyID: &proxy.ID, }) - mustBindAccountToGroup(s.T(), s.db, account.ID, group.ID, 1) + mustBindAccountToGroup(s.T(), s.client, account.ID, group.ID, 1) got, err := s.repo.GetByID(s.ctx, account.ID) s.Require().NoError(err, "GetByID") @@ -257,9 +261,9 @@ func (s *AccountRepoSuite) TestPreload_And_VirtualFields() { // --- GroupBinding / AddToGroup / RemoveFromGroup / BindGroups / GetGroups --- func (s *AccountRepoSuite) TestGroupBinding_And_BindGroups() { - g1 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g1"}) - g2 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g2"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc"}) + g1 := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g1"}) + g2 := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc"}) s.Require().NoError(s.repo.AddToGroup(s.ctx, account.ID, g1.ID, 10), "AddToGroup") groups, err := s.repo.GetGroups(s.ctx, account.ID) @@ -279,9 +283,9 @@ func (s *AccountRepoSuite) TestGroupBinding_And_BindGroups() { } func (s *AccountRepoSuite) TestBindGroups_EmptyList() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-empty"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-empty"}) - mustBindAccountToGroup(s.T(), s.db, account.ID, group.ID, 1) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-empty"}) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-empty"}) + mustBindAccountToGroup(s.T(), s.client, account.ID, group.ID, 1) s.Require().NoError(s.repo.BindGroups(s.ctx, account.ID, []int64{}), "BindGroups empty") @@ -294,14 +298,14 @@ func (s *AccountRepoSuite) TestBindGroups_EmptyList() { func (s *AccountRepoSuite) TestListSchedulable() { now := time.Now() - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-sched"}) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-sched"}) - okAcc := mustCreateAccount(s.T(), s.db, &accountModel{Name: "ok", Schedulable: true}) - mustBindAccountToGroup(s.T(), s.db, okAcc.ID, group.ID, 1) + okAcc := mustCreateAccount(s.T(), s.client, &service.Account{Name: "ok", Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, okAcc.ID, group.ID, 1) future := now.Add(10 * time.Minute) - overloaded := mustCreateAccount(s.T(), s.db, &accountModel{Name: "over", Schedulable: true, OverloadUntil: &future}) - mustBindAccountToGroup(s.T(), s.db, overloaded.ID, group.ID, 1) + overloaded := mustCreateAccount(s.T(), s.client, &service.Account{Name: "over", Schedulable: true, OverloadUntil: &future}) + mustBindAccountToGroup(s.T(), s.client, overloaded.ID, group.ID, 1) sched, err := s.repo.ListSchedulable(s.ctx) s.Require().NoError(err, "ListSchedulable") @@ -312,17 +316,17 @@ func (s *AccountRepoSuite) TestListSchedulable() { func (s *AccountRepoSuite) TestListSchedulableByGroupID_TimeBoundaries_And_StatusUpdates() { now := time.Now() - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-sched"}) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-sched"}) - okAcc := mustCreateAccount(s.T(), s.db, &accountModel{Name: "ok", Schedulable: true}) - mustBindAccountToGroup(s.T(), s.db, okAcc.ID, group.ID, 1) + okAcc := mustCreateAccount(s.T(), s.client, &service.Account{Name: "ok", Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, okAcc.ID, group.ID, 1) future := now.Add(10 * time.Minute) - overloaded := mustCreateAccount(s.T(), s.db, &accountModel{Name: "over", Schedulable: true, OverloadUntil: &future}) - mustBindAccountToGroup(s.T(), s.db, overloaded.ID, group.ID, 1) + overloaded := mustCreateAccount(s.T(), s.client, &service.Account{Name: "over", Schedulable: true, OverloadUntil: &future}) + mustBindAccountToGroup(s.T(), s.client, overloaded.ID, group.ID, 1) - rateLimited := mustCreateAccount(s.T(), s.db, &accountModel{Name: "rl", Schedulable: true}) - mustBindAccountToGroup(s.T(), s.db, rateLimited.ID, group.ID, 1) + rateLimited := mustCreateAccount(s.T(), s.client, &service.Account{Name: "rl", Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, rateLimited.ID, group.ID, 1) s.Require().NoError(s.repo.SetRateLimited(s.ctx, rateLimited.ID, now.Add(10*time.Minute)), "SetRateLimited") s.Require().NoError(s.repo.SetError(s.ctx, overloaded.ID, "boom"), "SetError") @@ -339,8 +343,8 @@ func (s *AccountRepoSuite) TestListSchedulableByGroupID_TimeBoundaries_And_Statu } func (s *AccountRepoSuite) TestListSchedulableByPlatform() { - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1", Platform: service.PlatformAnthropic, Schedulable: true}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2", Platform: service.PlatformOpenAI, Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a1", Platform: service.PlatformAnthropic, Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a2", Platform: service.PlatformOpenAI, Schedulable: true}) accounts, err := s.repo.ListSchedulableByPlatform(s.ctx, service.PlatformAnthropic) s.Require().NoError(err) @@ -349,11 +353,11 @@ func (s *AccountRepoSuite) TestListSchedulableByPlatform() { } func (s *AccountRepoSuite) TestListSchedulableByGroupIDAndPlatform() { - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-sp"}) - a1 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1", Platform: service.PlatformAnthropic, Schedulable: true}) - a2 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2", Platform: service.PlatformOpenAI, Schedulable: true}) - mustBindAccountToGroup(s.T(), s.db, a1.ID, group.ID, 1) - mustBindAccountToGroup(s.T(), s.db, a2.ID, group.ID, 2) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-sp"}) + a1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a1", Platform: service.PlatformAnthropic, Schedulable: true}) + a2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a2", Platform: service.PlatformOpenAI, Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, a1.ID, group.ID, 1) + mustBindAccountToGroup(s.T(), s.client, a2.ID, group.ID, 2) accounts, err := s.repo.ListSchedulableByGroupIDAndPlatform(s.ctx, group.ID, service.PlatformAnthropic) s.Require().NoError(err) @@ -362,7 +366,7 @@ func (s *AccountRepoSuite) TestListSchedulableByGroupIDAndPlatform() { } func (s *AccountRepoSuite) TestSetSchedulable() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-sched", Schedulable: true}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-sched", Schedulable: true}) s.Require().NoError(s.repo.SetSchedulable(s.ctx, account.ID, false)) @@ -374,7 +378,7 @@ func (s *AccountRepoSuite) TestSetSchedulable() { // --- SetOverloaded / SetRateLimited / ClearRateLimit --- func (s *AccountRepoSuite) TestSetOverloaded() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-over"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-over"}) until := time.Date(2025, 6, 15, 12, 0, 0, 0, time.UTC) s.Require().NoError(s.repo.SetOverloaded(s.ctx, account.ID, until)) @@ -386,7 +390,7 @@ func (s *AccountRepoSuite) TestSetOverloaded() { } func (s *AccountRepoSuite) TestSetRateLimited() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-rl"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-rl"}) resetAt := time.Date(2025, 6, 15, 14, 0, 0, 0, time.UTC) s.Require().NoError(s.repo.SetRateLimited(s.ctx, account.ID, resetAt)) @@ -399,7 +403,7 @@ func (s *AccountRepoSuite) TestSetRateLimited() { } func (s *AccountRepoSuite) TestClearRateLimit() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-clear"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-clear"}) until := time.Now().Add(1 * time.Hour) s.Require().NoError(s.repo.SetOverloaded(s.ctx, account.ID, until)) s.Require().NoError(s.repo.SetRateLimited(s.ctx, account.ID, until)) @@ -416,7 +420,7 @@ func (s *AccountRepoSuite) TestClearRateLimit() { // --- UpdateLastUsed --- func (s *AccountRepoSuite) TestUpdateLastUsed() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-used"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-used"}) s.Require().Nil(account.LastUsedAt) s.Require().NoError(s.repo.UpdateLastUsed(s.ctx, account.ID)) @@ -429,7 +433,7 @@ func (s *AccountRepoSuite) TestUpdateLastUsed() { // --- SetError --- func (s *AccountRepoSuite) TestSetError() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-err", Status: service.StatusActive}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-err", Status: service.StatusActive}) s.Require().NoError(s.repo.SetError(s.ctx, account.ID, "something went wrong")) @@ -442,7 +446,7 @@ func (s *AccountRepoSuite) TestSetError() { // --- UpdateSessionWindow --- func (s *AccountRepoSuite) TestUpdateSessionWindow() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-win"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-win"}) start := time.Date(2025, 6, 15, 10, 0, 0, 0, time.UTC) end := time.Date(2025, 6, 15, 15, 0, 0, 0, time.UTC) @@ -458,9 +462,9 @@ func (s *AccountRepoSuite) TestUpdateSessionWindow() { // --- UpdateExtra --- func (s *AccountRepoSuite) TestUpdateExtra_MergesFields() { - account := mustCreateAccount(s.T(), s.db, &accountModel{ + account := mustCreateAccount(s.T(), s.client, &service.Account{ Name: "acc-extra", - Extra: datatypes.JSONMap{"a": "1"}, + Extra: map[string]any{"a": "1"}, }) s.Require().NoError(s.repo.UpdateExtra(s.ctx, account.ID, map[string]any{"b": "2"}), "UpdateExtra") @@ -471,12 +475,12 @@ func (s *AccountRepoSuite) TestUpdateExtra_MergesFields() { } func (s *AccountRepoSuite) TestUpdateExtra_EmptyUpdates() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-extra-empty"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-extra-empty"}) s.Require().NoError(s.repo.UpdateExtra(s.ctx, account.ID, map[string]any{})) } func (s *AccountRepoSuite) TestUpdateExtra_NilExtra() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-nil-extra", Extra: nil}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-nil-extra", Extra: nil}) s.Require().NoError(s.repo.UpdateExtra(s.ctx, account.ID, map[string]any{"key": "val"})) got, err := s.repo.GetByID(s.ctx, account.ID) @@ -488,9 +492,9 @@ func (s *AccountRepoSuite) TestUpdateExtra_NilExtra() { func (s *AccountRepoSuite) TestGetByCRSAccountID() { crsID := "crs-12345" - mustCreateAccount(s.T(), s.db, &accountModel{ + mustCreateAccount(s.T(), s.client, &service.Account{ Name: "acc-crs", - Extra: datatypes.JSONMap{"crs_account_id": crsID}, + Extra: map[string]any{"crs_account_id": crsID}, }) got, err := s.repo.GetByCRSAccountID(s.ctx, crsID) @@ -514,8 +518,8 @@ func (s *AccountRepoSuite) TestGetByCRSAccountID_EmptyString() { // --- BulkUpdate --- func (s *AccountRepoSuite) TestBulkUpdate() { - a1 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "bulk1", Priority: 1}) - a2 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "bulk2", Priority: 1}) + a1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk1", Priority: 1}) + a2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk2", Priority: 1}) newPriority := 99 affected, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID, a2.ID}, service.AccountBulkUpdate{ @@ -531,13 +535,13 @@ func (s *AccountRepoSuite) TestBulkUpdate() { } func (s *AccountRepoSuite) TestBulkUpdate_MergeCredentials() { - a1 := mustCreateAccount(s.T(), s.db, &accountModel{ + a1 := mustCreateAccount(s.T(), s.client, &service.Account{ Name: "bulk-cred", - Credentials: datatypes.JSONMap{"existing": "value"}, + Credentials: map[string]any{"existing": "value"}, }) _, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID}, service.AccountBulkUpdate{ - Credentials: datatypes.JSONMap{"new_key": "new_value"}, + Credentials: map[string]any{"new_key": "new_value"}, }) s.Require().NoError(err) @@ -547,13 +551,13 @@ func (s *AccountRepoSuite) TestBulkUpdate_MergeCredentials() { } func (s *AccountRepoSuite) TestBulkUpdate_MergeExtra() { - a1 := mustCreateAccount(s.T(), s.db, &accountModel{ + a1 := mustCreateAccount(s.T(), s.client, &service.Account{ Name: "bulk-extra", - Extra: datatypes.JSONMap{"existing": "val"}, + Extra: map[string]any{"existing": "val"}, }) _, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID}, service.AccountBulkUpdate{ - Extra: datatypes.JSONMap{"new_key": "new_val"}, + Extra: map[string]any{"new_key": "new_val"}, }) s.Require().NoError(err) @@ -569,7 +573,7 @@ func (s *AccountRepoSuite) TestBulkUpdate_EmptyIDs() { } func (s *AccountRepoSuite) TestBulkUpdate_EmptyUpdates() { - a1 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "bulk-empty"}) + a1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk-empty"}) affected, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID}, service.AccountBulkUpdate{}) s.Require().NoError(err) diff --git a/backend/internal/repository/allowed_groups_contract_integration_test.go b/backend/internal/repository/allowed_groups_contract_integration_test.go new file mode 100644 index 00000000..c2aa945c --- /dev/null +++ b/backend/internal/repository/allowed_groups_contract_integration_test.go @@ -0,0 +1,144 @@ +//go:build integration + +package repository + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func uniqueTestValue(t *testing.T, prefix string) string { + t.Helper() + safeName := strings.NewReplacer("/", "_", " ", "_").Replace(t.Name()) + return fmt.Sprintf("%s-%s", prefix, safeName) +} + +func TestUserRepository_RemoveGroupFromAllowedGroups_RemovesAllOccurrences(t *testing.T) { + ctx := context.Background() + entClient, sqlTx := testEntSQLTx(t) + + targetGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "target-group")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + otherGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "other-group")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + + repo := newUserRepositoryWithSQL(entClient, sqlTx) + + u1 := &service.User{ + Email: uniqueTestValue(t, "u1") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{targetGroup.ID, otherGroup.ID}, + } + require.NoError(t, repo.Create(ctx, u1)) + + u2 := &service.User{ + Email: uniqueTestValue(t, "u2") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{targetGroup.ID}, + } + require.NoError(t, repo.Create(ctx, u2)) + + u3 := &service.User{ + Email: uniqueTestValue(t, "u3") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{otherGroup.ID}, + } + require.NoError(t, repo.Create(ctx, u3)) + + affected, err := repo.RemoveGroupFromAllowedGroups(ctx, targetGroup.ID) + require.NoError(t, err) + require.Equal(t, int64(2), affected) + + u1After, err := repo.GetByID(ctx, u1.ID) + require.NoError(t, err) + require.NotContains(t, u1After.AllowedGroups, targetGroup.ID) + require.Contains(t, u1After.AllowedGroups, otherGroup.ID) + + u2After, err := repo.GetByID(ctx, u2.ID) + require.NoError(t, err) + require.NotContains(t, u2After.AllowedGroups, targetGroup.ID) +} + +func TestGroupRepository_DeleteCascade_RemovesAllowedGroupsAndClearsApiKeys(t *testing.T) { + ctx := context.Background() + entClient, sqlTx := testEntSQLTx(t) + + targetGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "delete-cascade-target")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + otherGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "delete-cascade-other")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + + userRepo := newUserRepositoryWithSQL(entClient, sqlTx) + groupRepo := newGroupRepositoryWithSQL(entClient, sqlTx) + apiKeyRepo := NewApiKeyRepository(entClient) + + u := &service.User{ + Email: uniqueTestValue(t, "cascade-user") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{targetGroup.ID, otherGroup.ID}, + } + require.NoError(t, userRepo.Create(ctx, u)) + + key := &service.ApiKey{ + UserID: u.ID, + Key: uniqueTestValue(t, "sk-test-delete-cascade"), + Name: "test key", + GroupID: &targetGroup.ID, + Status: service.StatusActive, + } + require.NoError(t, apiKeyRepo.Create(ctx, key)) + + _, err = groupRepo.DeleteCascade(ctx, targetGroup.ID) + require.NoError(t, err) + + // Deleted group should be hidden by default queries (soft-delete semantics). + _, err = groupRepo.GetByID(ctx, targetGroup.ID) + require.ErrorIs(t, err, service.ErrGroupNotFound) + + activeGroups, err := groupRepo.ListActive(ctx) + require.NoError(t, err) + for _, g := range activeGroups { + require.NotEqual(t, targetGroup.ID, g.ID) + } + + // User.allowed_groups should no longer include the deleted group. + uAfter, err := userRepo.GetByID(ctx, u.ID) + require.NoError(t, err) + require.NotContains(t, uAfter.AllowedGroups, targetGroup.ID) + require.Contains(t, uAfter.AllowedGroups, otherGroup.ID) + + // API keys bound to the deleted group should have group_id cleared. + keyAfter, err := apiKeyRepo.GetByID(ctx, key.ID) + require.NoError(t, err) + require.Nil(t, keyAfter.GroupID) +} + diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index a6001ecc..352cc6e1 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -2,83 +2,118 @@ package repository import ( "context" - "time" + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" - - "gorm.io/gorm" ) type apiKeyRepository struct { - db *gorm.DB + client *dbent.Client } -func NewApiKeyRepository(db *gorm.DB) service.ApiKeyRepository { - return &apiKeyRepository{db: db} +func NewApiKeyRepository(client *dbent.Client) service.ApiKeyRepository { + return &apiKeyRepository{client: client} } func (r *apiKeyRepository) Create(ctx context.Context, key *service.ApiKey) error { - m := apiKeyModelFromService(key) - err := r.db.WithContext(ctx).Create(m).Error + created, err := r.client.ApiKey.Create(). + SetUserID(key.UserID). + SetKey(key.Key). + SetName(key.Name). + SetStatus(key.Status). + SetNillableGroupID(key.GroupID). + Save(ctx) if err == nil { - applyApiKeyModelToService(key, m) + key.ID = created.ID + key.CreatedAt = created.CreatedAt + key.UpdatedAt = created.UpdatedAt } return translatePersistenceError(err, nil, service.ErrApiKeyExists) } func (r *apiKeyRepository) GetByID(ctx context.Context, id int64) (*service.ApiKey, error) { - var m apiKeyModel - err := r.db.WithContext(ctx).Preload("User").Preload("Group").First(&m, id).Error + m, err := r.client.ApiKey.Query(). + Where(apikey.IDEQ(id)). + WithUser(). + WithGroup(). + Only(ctx) if err != nil { - return nil, translatePersistenceError(err, service.ErrApiKeyNotFound, nil) + if dbent.IsNotFound(err) { + return nil, service.ErrApiKeyNotFound + } + return nil, err } - return apiKeyModelToService(&m), nil + return apiKeyEntityToService(m), nil } func (r *apiKeyRepository) GetByKey(ctx context.Context, key string) (*service.ApiKey, error) { - var m apiKeyModel - err := r.db.WithContext(ctx).Preload("User").Preload("Group").Where("key = ?", key).First(&m).Error + m, err := r.client.ApiKey.Query(). + Where(apikey.KeyEQ(key)). + WithUser(). + WithGroup(). + Only(ctx) if err != nil { - return nil, translatePersistenceError(err, service.ErrApiKeyNotFound, nil) + if dbent.IsNotFound(err) { + return nil, service.ErrApiKeyNotFound + } + return nil, err } - return apiKeyModelToService(&m), nil + return apiKeyEntityToService(m), nil } func (r *apiKeyRepository) Update(ctx context.Context, key *service.ApiKey) error { - m := apiKeyModelFromService(key) - err := r.db.WithContext(ctx).Model(m).Select("name", "group_id", "status", "updated_at").Updates(m).Error + builder := r.client.ApiKey.UpdateOneID(key.ID). + SetName(key.Name). + SetStatus(key.Status) + if key.GroupID != nil { + builder.SetGroupID(*key.GroupID) + } else { + builder.ClearGroupID() + } + + updated, err := builder.Save(ctx) if err == nil { - applyApiKeyModelToService(key, m) + key.UpdatedAt = updated.UpdatedAt + return nil + } + if dbent.IsNotFound(err) { + return service.ErrApiKeyNotFound } return err } func (r *apiKeyRepository) Delete(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Delete(&apiKeyModel{}, id).Error + _, err := r.client.ApiKey.Delete().Where(apikey.IDEQ(id)).Exec(ctx) + return err } func (r *apiKeyRepository) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.ApiKey, *pagination.PaginationResult, error) { - var keys []apiKeyModel - var total int64 + q := r.client.ApiKey.Query().Where(apikey.UserIDEQ(userID)) - db := r.db.WithContext(ctx).Model(&apiKeyModel{}).Where("user_id = ?", userID) - - if err := db.Count(&total).Error; err != nil { + total, err := q.Count(ctx) + if err != nil { return nil, nil, err } - if err := db.Preload("Group").Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&keys).Error; err != nil { + keys, err := q. + WithGroup(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(apikey.FieldID)). + All(ctx) + if err != nil { return nil, nil, err } outKeys := make([]service.ApiKey, 0, len(keys)) for i := range keys { - outKeys = append(outKeys, *apiKeyModelToService(&keys[i])) + outKeys = append(outKeys, *apiKeyEntityToService(keys[i])) } - return outKeys, paginationResultFromTotal(total, params), nil + return outKeys, paginationResultFromTotal(int64(total), params), nil } func (r *apiKeyRepository) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { @@ -86,11 +121,9 @@ func (r *apiKeyRepository) VerifyOwnership(ctx context.Context, userID int64, ap return []int64{}, nil } - ids := make([]int64, 0, len(apiKeyIDs)) - err := r.db.WithContext(ctx). - Model(&apiKeyModel{}). - Where("user_id = ? AND id IN ?", userID, apiKeyIDs). - Pluck("id", &ids).Error + ids, err := r.client.ApiKey.Query(). + Where(apikey.UserIDEQ(userID), apikey.IDIn(apiKeyIDs...)). + IDs(ctx) if err != nil { return nil, err } @@ -98,136 +131,146 @@ func (r *apiKeyRepository) VerifyOwnership(ctx context.Context, userID int64, ap } func (r *apiKeyRepository) CountByUserID(ctx context.Context, userID int64) (int64, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&apiKeyModel{}).Where("user_id = ?", userID).Count(&count).Error - return count, err + count, err := r.client.ApiKey.Query().Where(apikey.UserIDEQ(userID)).Count(ctx) + return int64(count), err } func (r *apiKeyRepository) ExistsByKey(ctx context.Context, key string) (bool, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&apiKeyModel{}).Where("key = ?", key).Count(&count).Error + count, err := r.client.ApiKey.Query().Where(apikey.KeyEQ(key)).Count(ctx) return count > 0, err } func (r *apiKeyRepository) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.ApiKey, *pagination.PaginationResult, error) { - var keys []apiKeyModel - var total int64 + q := r.client.ApiKey.Query().Where(apikey.GroupIDEQ(groupID)) - db := r.db.WithContext(ctx).Model(&apiKeyModel{}).Where("group_id = ?", groupID) - - if err := db.Count(&total).Error; err != nil { + total, err := q.Count(ctx) + if err != nil { return nil, nil, err } - if err := db.Preload("User").Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&keys).Error; err != nil { + keys, err := q. + WithUser(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(apikey.FieldID)). + All(ctx) + if err != nil { return nil, nil, err } outKeys := make([]service.ApiKey, 0, len(keys)) for i := range keys { - outKeys = append(outKeys, *apiKeyModelToService(&keys[i])) + outKeys = append(outKeys, *apiKeyEntityToService(keys[i])) } - return outKeys, paginationResultFromTotal(total, params), nil + return outKeys, paginationResultFromTotal(int64(total), params), nil } // SearchApiKeys searches API keys by user ID and/or keyword (name) func (r *apiKeyRepository) SearchApiKeys(ctx context.Context, userID int64, keyword string, limit int) ([]service.ApiKey, error) { - var keys []apiKeyModel - - db := r.db.WithContext(ctx).Model(&apiKeyModel{}) - + q := r.client.ApiKey.Query() if userID > 0 { - db = db.Where("user_id = ?", userID) + q = q.Where(apikey.UserIDEQ(userID)) } if keyword != "" { - searchPattern := "%" + keyword + "%" - db = db.Where("name ILIKE ?", searchPattern) + q = q.Where(apikey.NameContainsFold(keyword)) } - if err := db.Limit(limit).Order("id DESC").Find(&keys).Error; err != nil { + keys, err := q.Limit(limit).Order(dbent.Desc(apikey.FieldID)).All(ctx) + if err != nil { return nil, err } outKeys := make([]service.ApiKey, 0, len(keys)) for i := range keys { - outKeys = append(outKeys, *apiKeyModelToService(&keys[i])) + outKeys = append(outKeys, *apiKeyEntityToService(keys[i])) } return outKeys, nil } // ClearGroupIDByGroupID 将指定分组的所有 API Key 的 group_id 设为 nil func (r *apiKeyRepository) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { - result := r.db.WithContext(ctx).Model(&apiKeyModel{}). - Where("group_id = ?", groupID). - Update("group_id", nil) - return result.RowsAffected, result.Error + n, err := r.client.ApiKey.Update(). + Where(apikey.GroupIDEQ(groupID)). + ClearGroupID(). + Save(ctx) + return int64(n), err } // CountByGroupID 获取分组的 API Key 数量 func (r *apiKeyRepository) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&apiKeyModel{}).Where("group_id = ?", groupID).Count(&count).Error - return count, err + count, err := r.client.ApiKey.Query().Where(apikey.GroupIDEQ(groupID)).Count(ctx) + return int64(count), err } -type apiKeyModel struct { - ID int64 `gorm:"primaryKey"` - UserID int64 `gorm:"index;not null"` - Key string `gorm:"uniqueIndex;size:128;not null"` - Name string `gorm:"size:100;not null"` - GroupID *int64 `gorm:"index"` - Status string `gorm:"size:20;default:active;not null"` - CreatedAt time.Time `gorm:"not null"` - UpdatedAt time.Time `gorm:"not null"` - DeletedAt gorm.DeletedAt `gorm:"index"` - - User *userModel `gorm:"foreignKey:UserID"` - Group *groupModel `gorm:"foreignKey:GroupID"` -} - -func (apiKeyModel) TableName() string { return "api_keys" } - -func apiKeyModelToService(m *apiKeyModel) *service.ApiKey { +func apiKeyEntityToService(m *dbent.ApiKey) *service.ApiKey { if m == nil { return nil } - return &service.ApiKey{ + out := &service.ApiKey{ ID: m.ID, UserID: m.UserID, Key: m.Key, Name: m.Name, - GroupID: m.GroupID, Status: m.Status, CreatedAt: m.CreatedAt, UpdatedAt: m.UpdatedAt, - User: userModelToService(m.User), - Group: groupModelToService(m.Group), + GroupID: m.GroupID, } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + if m.Edges.Group != nil { + out.Group = groupEntityToService(m.Edges.Group) + } + return out } -func apiKeyModelFromService(k *service.ApiKey) *apiKeyModel { - if k == nil { +func userEntityToService(u *dbent.User) *service.User { + if u == nil { return nil } - return &apiKeyModel{ - ID: k.ID, - UserID: k.UserID, - Key: k.Key, - Name: k.Name, - GroupID: k.GroupID, - Status: k.Status, - CreatedAt: k.CreatedAt, - UpdatedAt: k.UpdatedAt, + return &service.User{ + ID: u.ID, + Email: u.Email, + Username: u.Username, + Wechat: u.Wechat, + Notes: u.Notes, + PasswordHash: u.PasswordHash, + Role: u.Role, + Balance: u.Balance, + Concurrency: u.Concurrency, + Status: u.Status, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, } } -func applyApiKeyModelToService(key *service.ApiKey, m *apiKeyModel) { - if key == nil || m == nil { - return +func groupEntityToService(g *dbent.Group) *service.Group { + if g == nil { + return nil + } + return &service.Group{ + ID: g.ID, + Name: g.Name, + Description: derefString(g.Description), + Platform: g.Platform, + RateMultiplier: g.RateMultiplier, + IsExclusive: g.IsExclusive, + Status: g.Status, + SubscriptionType: g.SubscriptionType, + DailyLimitUSD: g.DailyLimitUsd, + WeeklyLimitUSD: g.WeeklyLimitUsd, + MonthlyLimitUSD: g.MonthlyLimitUsd, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, } - key.ID = m.ID - key.CreatedAt = m.CreatedAt - key.UpdatedAt = m.UpdatedAt +} + +func derefString(s *string) string { + if s == nil { + return "" + } + return *s } diff --git a/backend/internal/repository/api_key_repo_integration_test.go b/backend/internal/repository/api_key_repo_integration_test.go index 384ee364..0916fcc5 100644 --- a/backend/internal/repository/api_key_repo_integration_test.go +++ b/backend/internal/repository/api_key_repo_integration_test.go @@ -6,23 +6,24 @@ import ( "context" "testing" + dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type ApiKeyRepoSuite struct { suite.Suite - ctx context.Context - db *gorm.DB - repo *apiKeyRepository + ctx context.Context + client *dbent.Client + repo *apiKeyRepository } func (s *ApiKeyRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewApiKeyRepository(s.db).(*apiKeyRepository) + entClient, _ := testEntSQLTx(s.T()) + s.client = entClient + s.repo = NewApiKeyRepository(entClient).(*apiKeyRepository) } func TestApiKeyRepoSuite(t *testing.T) { @@ -32,7 +33,7 @@ func TestApiKeyRepoSuite(t *testing.T) { // --- Create / GetByID / GetByKey --- func (s *ApiKeyRepoSuite) TestCreate() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "create@test.com"}) + user := s.mustCreateUser("create@test.com") key := &service.ApiKey{ UserID: user.ID, @@ -56,16 +57,17 @@ func (s *ApiKeyRepoSuite) TestGetByID_NotFound() { } func (s *ApiKeyRepoSuite) TestGetByKey() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "getbykey@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-key"}) + user := s.mustCreateUser("getbykey@test.com") + group := s.mustCreateGroup("g-key") - key := mustCreateApiKey(s.T(), s.db, &apiKeyModel{ + key := &service.ApiKey{ UserID: user.ID, Key: "sk-getbykey", Name: "My Key", GroupID: &group.ID, Status: service.StatusActive, - }) + } + s.Require().NoError(s.repo.Create(s.ctx, key)) got, err := s.repo.GetByKey(s.ctx, key.Key) s.Require().NoError(err, "GetByKey") @@ -84,13 +86,14 @@ func (s *ApiKeyRepoSuite) TestGetByKey_NotFound() { // --- Update --- func (s *ApiKeyRepoSuite) TestUpdate() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "update@test.com"}) - key := apiKeyModelToService(mustCreateApiKey(s.T(), s.db, &apiKeyModel{ + user := s.mustCreateUser("update@test.com") + key := &service.ApiKey{ UserID: user.ID, Key: "sk-update", Name: "Original", Status: service.StatusActive, - })) + } + s.Require().NoError(s.repo.Create(s.ctx, key)) key.Name = "Renamed" key.Status = service.StatusDisabled @@ -106,14 +109,16 @@ func (s *ApiKeyRepoSuite) TestUpdate() { } func (s *ApiKeyRepoSuite) TestUpdate_ClearGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "cleargroup@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-clear"}) - key := apiKeyModelToService(mustCreateApiKey(s.T(), s.db, &apiKeyModel{ + user := s.mustCreateUser("cleargroup@test.com") + group := s.mustCreateGroup("g-clear") + key := &service.ApiKey{ UserID: user.ID, Key: "sk-clear-group", Name: "Group Key", GroupID: &group.ID, - })) + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, key)) key.GroupID = nil err := s.repo.Update(s.ctx, key) @@ -127,12 +132,14 @@ func (s *ApiKeyRepoSuite) TestUpdate_ClearGroupID() { // --- Delete --- func (s *ApiKeyRepoSuite) TestDelete() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "delete@test.com"}) - key := mustCreateApiKey(s.T(), s.db, &apiKeyModel{ + user := s.mustCreateUser("delete@test.com") + key := &service.ApiKey{ UserID: user.ID, Key: "sk-delete", Name: "Delete Me", - }) + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, key)) err := s.repo.Delete(s.ctx, key.ID) s.Require().NoError(err, "Delete") @@ -144,9 +151,9 @@ func (s *ApiKeyRepoSuite) TestDelete() { // --- ListByUserID / CountByUserID --- func (s *ApiKeyRepoSuite) TestListByUserID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listbyuser@test.com"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-list-1", Name: "Key 1"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-list-2", Name: "Key 2"}) + user := s.mustCreateUser("listbyuser@test.com") + s.mustCreateApiKey(user.ID, "sk-list-1", "Key 1", nil) + s.mustCreateApiKey(user.ID, "sk-list-2", "Key 2", nil) keys, page, err := s.repo.ListByUserID(s.ctx, user.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "ListByUserID") @@ -155,13 +162,9 @@ func (s *ApiKeyRepoSuite) TestListByUserID() { } func (s *ApiKeyRepoSuite) TestListByUserID_Pagination() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "paging@test.com"}) + user := s.mustCreateUser("paging@test.com") for i := 0; i < 5; i++ { - mustCreateApiKey(s.T(), s.db, &apiKeyModel{ - UserID: user.ID, - Key: "sk-page-" + string(rune('a'+i)), - Name: "Key", - }) + s.mustCreateApiKey(user.ID, "sk-page-"+string(rune('a'+i)), "Key", nil) } keys, page, err := s.repo.ListByUserID(s.ctx, user.ID, pagination.PaginationParams{Page: 1, PageSize: 2}) @@ -172,9 +175,9 @@ func (s *ApiKeyRepoSuite) TestListByUserID_Pagination() { } func (s *ApiKeyRepoSuite) TestCountByUserID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "count@test.com"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-count-1", Name: "K1"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-count-2", Name: "K2"}) + user := s.mustCreateUser("count@test.com") + s.mustCreateApiKey(user.ID, "sk-count-1", "K1", nil) + s.mustCreateApiKey(user.ID, "sk-count-2", "K2", nil) count, err := s.repo.CountByUserID(s.ctx, user.ID) s.Require().NoError(err, "CountByUserID") @@ -184,12 +187,12 @@ func (s *ApiKeyRepoSuite) TestCountByUserID() { // --- ListByGroupID / CountByGroupID --- func (s *ApiKeyRepoSuite) TestListByGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listbygroup@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-list"}) + user := s.mustCreateUser("listbygroup@test.com") + group := s.mustCreateGroup("g-list") - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-grp-1", Name: "K1", GroupID: &group.ID}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-grp-2", Name: "K2", GroupID: &group.ID}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-grp-3", Name: "K3"}) // no group + s.mustCreateApiKey(user.ID, "sk-grp-1", "K1", &group.ID) + s.mustCreateApiKey(user.ID, "sk-grp-2", "K2", &group.ID) + s.mustCreateApiKey(user.ID, "sk-grp-3", "K3", nil) // no group keys, page, err := s.repo.ListByGroupID(s.ctx, group.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "ListByGroupID") @@ -200,10 +203,9 @@ func (s *ApiKeyRepoSuite) TestListByGroupID() { } func (s *ApiKeyRepoSuite) TestCountByGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "countgroup@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-count"}) - - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-gc-1", Name: "K1", GroupID: &group.ID}) + user := s.mustCreateUser("countgroup@test.com") + group := s.mustCreateGroup("g-count") + s.mustCreateApiKey(user.ID, "sk-gc-1", "K1", &group.ID) count, err := s.repo.CountByGroupID(s.ctx, group.ID) s.Require().NoError(err, "CountByGroupID") @@ -213,8 +215,8 @@ func (s *ApiKeyRepoSuite) TestCountByGroupID() { // --- ExistsByKey --- func (s *ApiKeyRepoSuite) TestExistsByKey() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "exists@test.com"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-exists", Name: "K"}) + user := s.mustCreateUser("exists@test.com") + s.mustCreateApiKey(user.ID, "sk-exists", "K", nil) exists, err := s.repo.ExistsByKey(s.ctx, "sk-exists") s.Require().NoError(err, "ExistsByKey") @@ -228,9 +230,9 @@ func (s *ApiKeyRepoSuite) TestExistsByKey() { // --- SearchApiKeys --- func (s *ApiKeyRepoSuite) TestSearchApiKeys() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "search@test.com"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-search-1", Name: "Production Key"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-search-2", Name: "Development Key"}) + user := s.mustCreateUser("search@test.com") + s.mustCreateApiKey(user.ID, "sk-search-1", "Production Key", nil) + s.mustCreateApiKey(user.ID, "sk-search-2", "Development Key", nil) found, err := s.repo.SearchApiKeys(s.ctx, user.ID, "prod", 10) s.Require().NoError(err, "SearchApiKeys") @@ -239,9 +241,9 @@ func (s *ApiKeyRepoSuite) TestSearchApiKeys() { } func (s *ApiKeyRepoSuite) TestSearchApiKeys_NoKeyword() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "searchnokw@test.com"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-nk-1", Name: "K1"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-nk-2", Name: "K2"}) + user := s.mustCreateUser("searchnokw@test.com") + s.mustCreateApiKey(user.ID, "sk-nk-1", "K1", nil) + s.mustCreateApiKey(user.ID, "sk-nk-2", "K2", nil) found, err := s.repo.SearchApiKeys(s.ctx, user.ID, "", 10) s.Require().NoError(err) @@ -249,8 +251,8 @@ func (s *ApiKeyRepoSuite) TestSearchApiKeys_NoKeyword() { } func (s *ApiKeyRepoSuite) TestSearchApiKeys_NoUserID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "searchnouid@test.com"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-nu-1", Name: "TestKey"}) + user := s.mustCreateUser("searchnouid@test.com") + s.mustCreateApiKey(user.ID, "sk-nu-1", "TestKey", nil) found, err := s.repo.SearchApiKeys(s.ctx, 0, "testkey", 10) s.Require().NoError(err) @@ -260,12 +262,12 @@ func (s *ApiKeyRepoSuite) TestSearchApiKeys_NoUserID() { // --- ClearGroupIDByGroupID --- func (s *ApiKeyRepoSuite) TestClearGroupIDByGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "cleargrp@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-clear-bulk"}) + user := s.mustCreateUser("cleargrp@test.com") + group := s.mustCreateGroup("g-clear-bulk") - k1 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-clr-1", Name: "K1", GroupID: &group.ID}) - k2 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-clr-2", Name: "K2", GroupID: &group.ID}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-clr-3", Name: "K3"}) // no group + k1 := s.mustCreateApiKey(user.ID, "sk-clr-1", "K1", &group.ID) + k2 := s.mustCreateApiKey(user.ID, "sk-clr-2", "K2", &group.ID) + s.mustCreateApiKey(user.ID, "sk-clr-3", "K3", nil) // no group affected, err := s.repo.ClearGroupIDByGroupID(s.ctx, group.ID) s.Require().NoError(err, "ClearGroupIDByGroupID") @@ -283,16 +285,10 @@ func (s *ApiKeyRepoSuite) TestClearGroupIDByGroupID() { // --- Combined CRUD/Search/ClearGroupID (original test preserved as integration) --- func (s *ApiKeyRepoSuite) TestCRUD_Search_ClearGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "k@example.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-k"}) - - key := apiKeyModelToService(mustCreateApiKey(s.T(), s.db, &apiKeyModel{ - UserID: user.ID, - Key: "sk-test-1", - Name: "My Key", - GroupID: &group.ID, - Status: service.StatusActive, - })) + user := s.mustCreateUser("k@example.com") + group := s.mustCreateGroup("g-k") + key := s.mustCreateApiKey(user.ID, "sk-test-1", "My Key", &group.ID) + key.GroupID = &group.ID got, err := s.repo.GetByKey(s.ctx, key.Key) s.Require().NoError(err, "GetByKey") @@ -330,12 +326,8 @@ func (s *ApiKeyRepoSuite) TestCRUD_Search_ClearGroupID() { s.Require().Equal(key.ID, found[0].ID) // ClearGroupIDByGroupID - k2 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{ - UserID: user.ID, - Key: "sk-test-2", - Name: "Group Key", - GroupID: &group.ID, - }) + k2 := s.mustCreateApiKey(user.ID, "sk-test-2", "Group Key", &group.ID) + k2.GroupID = &group.ID countBefore, err := s.repo.CountByGroupID(s.ctx, group.ID) s.Require().NoError(err, "CountByGroupID") @@ -353,3 +345,41 @@ func (s *ApiKeyRepoSuite) TestCRUD_Search_ClearGroupID() { s.Require().NoError(err, "CountByGroupID after clear") s.Require().Equal(int64(0), countAfter, "expected 0 keys in group after clear") } + +func (s *ApiKeyRepoSuite) mustCreateUser(email string) *service.User { + s.T().Helper() + + u, err := s.client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + SetStatus(service.StatusActive). + SetRole(service.RoleUser). + Save(s.ctx) + s.Require().NoError(err, "create user") + return userEntityToService(u) +} + +func (s *ApiKeyRepoSuite) mustCreateGroup(name string) *service.Group { + s.T().Helper() + + g, err := s.client.Group.Create(). + SetName(name). + SetStatus(service.StatusActive). + Save(s.ctx) + s.Require().NoError(err, "create group") + return groupEntityToService(g) +} + +func (s *ApiKeyRepoSuite) mustCreateApiKey(userID int64, key, name string, groupID *int64) *service.ApiKey { + s.T().Helper() + + k := &service.ApiKey{ + UserID: userID, + Key: key, + Name: name, + GroupID: groupID, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, k), "create api key") + return k +} diff --git a/backend/internal/repository/auto_migrate.go b/backend/internal/repository/auto_migrate.go deleted file mode 100644 index 9127eeb9..00000000 --- a/backend/internal/repository/auto_migrate.go +++ /dev/null @@ -1,49 +0,0 @@ -package repository - -import ( - "log" - "time" - - "gorm.io/gorm" -) - -// MaxExpiresAt is the maximum allowed expiration date for subscriptions (year 2099) -// This prevents time.Time JSON serialization errors (RFC 3339 requires year <= 9999) -var maxExpiresAt = time.Date(2099, 12, 31, 23, 59, 59, 0, time.UTC) - -// AutoMigrate runs schema migrations for all repository persistence models. -// Persistence models are defined within individual `*_repo.go` files. -func AutoMigrate(db *gorm.DB) error { - err := db.AutoMigrate( - &userModel{}, - &apiKeyModel{}, - &groupModel{}, - &accountModel{}, - &accountGroupModel{}, - &proxyModel{}, - &redeemCodeModel{}, - &usageLogModel{}, - &settingModel{}, - &userSubscriptionModel{}, - ) - if err != nil { - return err - } - - // 修复无效的过期时间(年份超过 2099 会导致 JSON 序列化失败) - return fixInvalidExpiresAt(db) -} - -// fixInvalidExpiresAt 修复 user_subscriptions 表中无效的过期时间 -func fixInvalidExpiresAt(db *gorm.DB) error { - result := db.Model(&userSubscriptionModel{}). - Where("expires_at > ?", maxExpiresAt). - Update("expires_at", maxExpiresAt) - if result.Error != nil { - return result.Error - } - if result.RowsAffected > 0 { - log.Printf("[AutoMigrate] Fixed %d subscriptions with invalid expires_at (year > 2099)", result.RowsAffected) - } - return nil -} diff --git a/backend/internal/repository/error_translate.go b/backend/internal/repository/error_translate.go index c70af510..68348830 100644 --- a/backend/internal/repository/error_translate.go +++ b/backend/internal/repository/error_translate.go @@ -1,38 +1,75 @@ package repository import ( + "database/sql" "errors" "strings" + dbent "github.com/Wei-Shaw/sub2api/ent" infraerrors "github.com/Wei-Shaw/sub2api/internal/infrastructure/errors" - "gorm.io/gorm" + "github.com/lib/pq" ) +// translatePersistenceError 将数据库层错误翻译为业务层错误。 +// +// 这是 Repository 层的核心错误处理函数,确保数据库细节不会泄露到业务层。 +// 通过统一的错误翻译,业务层可以使用语义明确的错误类型(如 ErrUserNotFound) +// 而不是依赖于特定数据库的错误(如 sql.ErrNoRows)。 +// +// 参数: +// - err: 原始数据库错误 +// - notFound: 当记录不存在时返回的业务错误(可为 nil 表示不处理) +// - conflict: 当违反唯一约束时返回的业务错误(可为 nil 表示不处理) +// +// 返回: +// - 翻译后的业务错误,或原始错误(如果不匹配任何规则) +// +// 示例: +// +// err := translatePersistenceError(dbErr, service.ErrUserNotFound, service.ErrEmailExists) func translatePersistenceError(err error, notFound, conflict *infraerrors.ApplicationError) error { if err == nil { return nil } - if notFound != nil && errors.Is(err, gorm.ErrRecordNotFound) { + // 兼容 Ent ORM 和标准 database/sql 的 NotFound 行为。 + // Ent 使用自定义的 NotFoundError,而标准库使用 sql.ErrNoRows。 + // 这里同时处理两种情况,保持业务错误映射一致。 + if notFound != nil && (errors.Is(err, sql.ErrNoRows) || dbent.IsNotFound(err)) { return notFound.WithCause(err) } + // 处理唯一约束冲突(如邮箱已存在、名称重复等) if conflict != nil && isUniqueConstraintViolation(err) { return conflict.WithCause(err) } + // 未匹配任何规则,返回原始错误 return err } +// isUniqueConstraintViolation 判断错误是否为唯一约束冲突。 +// +// 支持多种检测方式: +// 1. PostgreSQL 特定错误码 23505(唯一约束冲突) +// 2. 错误消息中包含的通用关键词 +// +// 这种多层次的检测确保了对不同数据库驱动和 ORM 的兼容性。 func isUniqueConstraintViolation(err error) bool { if err == nil { return false } - if errors.Is(err, gorm.ErrDuplicatedKey) { - return true + // 优先检测 PostgreSQL 特定错误码(最精确)。 + // 错误码 23505 对应 unique_violation。 + // 参考:https://www.postgresql.org/docs/current/errcodes-appendix.html + var pgErr *pq.Error + if errors.As(err, &pgErr) { + return pgErr.Code == "23505" } + // 回退到错误消息检测(兼容其他场景)。 + // 这些关键词覆盖了 PostgreSQL、MySQL 等主流数据库的错误消息。 msg := strings.ToLower(err.Error()) return strings.Contains(msg, "duplicate key") || strings.Contains(msg, "unique constraint") || diff --git a/backend/internal/repository/fixtures_integration_test.go b/backend/internal/repository/fixtures_integration_test.go index 72c5c0d5..253f24f0 100644 --- a/backend/internal/repository/fixtures_integration_test.go +++ b/backend/internal/repository/fixtures_integration_test.go @@ -3,17 +3,22 @@ package repository import ( + "context" "testing" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/require" - "gorm.io/datatypes" - "gorm.io/gorm" ) -func mustCreateUser(t *testing.T, db *gorm.DB, u *userModel) *userModel { +func mustCreateUser(t *testing.T, client *dbent.Client, u *service.User) *service.User { t.Helper() + ctx := context.Background() + + if u.Email == "" { + u.Email = "user-" + time.Now().Format(time.RFC3339Nano) + "@example.com" + } if u.PasswordHash == "" { u.PasswordHash = "test-password-hash" } @@ -26,18 +31,48 @@ func mustCreateUser(t *testing.T, db *gorm.DB, u *userModel) *userModel { if u.Concurrency == 0 { u.Concurrency = 5 } - if u.CreatedAt.IsZero() { - u.CreatedAt = time.Now() + + create := client.User.Create(). + SetEmail(u.Email). + SetPasswordHash(u.PasswordHash). + SetRole(u.Role). + SetStatus(u.Status). + SetBalance(u.Balance). + SetConcurrency(u.Concurrency). + SetUsername(u.Username). + SetWechat(u.Wechat). + SetNotes(u.Notes) + if !u.CreatedAt.IsZero() { + create.SetCreatedAt(u.CreatedAt) } - if u.UpdatedAt.IsZero() { - u.UpdatedAt = u.CreatedAt + if !u.UpdatedAt.IsZero() { + create.SetUpdatedAt(u.UpdatedAt) } - require.NoError(t, db.Create(u).Error, "create user") + + created, err := create.Save(ctx) + require.NoError(t, err, "create user") + + u.ID = created.ID + u.CreatedAt = created.CreatedAt + u.UpdatedAt = created.UpdatedAt + + if len(u.AllowedGroups) > 0 { + for _, groupID := range u.AllowedGroups { + _, err := client.UserAllowedGroup.Create(). + SetUserID(u.ID). + SetGroupID(groupID). + Save(ctx) + require.NoError(t, err, "create user_allowed_groups row") + } + } + return u } -func mustCreateGroup(t *testing.T, db *gorm.DB, g *groupModel) *groupModel { +func mustCreateGroup(t *testing.T, client *dbent.Client, g *service.Group) *service.Group { t.Helper() + ctx := context.Background() + if g.Platform == "" { g.Platform = service.PlatformAnthropic } @@ -47,18 +82,46 @@ func mustCreateGroup(t *testing.T, db *gorm.DB, g *groupModel) *groupModel { if g.SubscriptionType == "" { g.SubscriptionType = service.SubscriptionTypeStandard } - if g.CreatedAt.IsZero() { - g.CreatedAt = time.Now() + + create := client.Group.Create(). + SetName(g.Name). + SetPlatform(g.Platform). + SetStatus(g.Status). + SetSubscriptionType(g.SubscriptionType). + SetRateMultiplier(g.RateMultiplier). + SetIsExclusive(g.IsExclusive) + if g.Description != "" { + create.SetDescription(g.Description) } - if g.UpdatedAt.IsZero() { - g.UpdatedAt = g.CreatedAt + if g.DailyLimitUSD != nil { + create.SetDailyLimitUsd(*g.DailyLimitUSD) } - require.NoError(t, db.Create(g).Error, "create group") + if g.WeeklyLimitUSD != nil { + create.SetWeeklyLimitUsd(*g.WeeklyLimitUSD) + } + if g.MonthlyLimitUSD != nil { + create.SetMonthlyLimitUsd(*g.MonthlyLimitUSD) + } + if !g.CreatedAt.IsZero() { + create.SetCreatedAt(g.CreatedAt) + } + if !g.UpdatedAt.IsZero() { + create.SetUpdatedAt(g.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create group") + + g.ID = created.ID + g.CreatedAt = created.CreatedAt + g.UpdatedAt = created.UpdatedAt return g } -func mustCreateProxy(t *testing.T, db *gorm.DB, p *proxyModel) *proxyModel { +func mustCreateProxy(t *testing.T, client *dbent.Client, p *service.Proxy) *service.Proxy { t.Helper() + ctx := context.Background() + if p.Protocol == "" { p.Protocol = "http" } @@ -71,18 +134,39 @@ func mustCreateProxy(t *testing.T, db *gorm.DB, p *proxyModel) *proxyModel { if p.Status == "" { p.Status = service.StatusActive } - if p.CreatedAt.IsZero() { - p.CreatedAt = time.Now() + + create := client.Proxy.Create(). + SetName(p.Name). + SetProtocol(p.Protocol). + SetHost(p.Host). + SetPort(p.Port). + SetStatus(p.Status) + if p.Username != "" { + create.SetUsername(p.Username) } - if p.UpdatedAt.IsZero() { - p.UpdatedAt = p.CreatedAt + if p.Password != "" { + create.SetPassword(p.Password) } - require.NoError(t, db.Create(p).Error, "create proxy") + if !p.CreatedAt.IsZero() { + create.SetCreatedAt(p.CreatedAt) + } + if !p.UpdatedAt.IsZero() { + create.SetUpdatedAt(p.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create proxy") + + p.ID = created.ID + p.CreatedAt = created.CreatedAt + p.UpdatedAt = created.UpdatedAt return p } -func mustCreateAccount(t *testing.T, db *gorm.DB, a *accountModel) *accountModel { +func mustCreateAccount(t *testing.T, client *dbent.Client, a *service.Account) *service.Account { t.Helper() + ctx := context.Background() + if a.Platform == "" { a.Platform = service.PlatformAnthropic } @@ -92,57 +176,158 @@ func mustCreateAccount(t *testing.T, db *gorm.DB, a *accountModel) *accountModel if a.Status == "" { a.Status = service.StatusActive } + if a.Concurrency == 0 { + a.Concurrency = 3 + } + if a.Priority == 0 { + a.Priority = 50 + } if !a.Schedulable { a.Schedulable = true } if a.Credentials == nil { - a.Credentials = datatypes.JSONMap{} + a.Credentials = map[string]any{} } if a.Extra == nil { - a.Extra = datatypes.JSONMap{} + a.Extra = map[string]any{} } - if a.CreatedAt.IsZero() { - a.CreatedAt = time.Now() + + create := client.Account.Create(). + SetName(a.Name). + SetPlatform(a.Platform). + SetType(a.Type). + SetCredentials(a.Credentials). + SetExtra(a.Extra). + SetConcurrency(a.Concurrency). + SetPriority(a.Priority). + SetStatus(a.Status). + SetSchedulable(a.Schedulable). + SetErrorMessage(a.ErrorMessage) + + if a.ProxyID != nil { + create.SetProxyID(*a.ProxyID) } - if a.UpdatedAt.IsZero() { - a.UpdatedAt = a.CreatedAt + if a.LastUsedAt != nil { + create.SetLastUsedAt(*a.LastUsedAt) } - require.NoError(t, db.Create(a).Error, "create account") + if a.RateLimitedAt != nil { + create.SetRateLimitedAt(*a.RateLimitedAt) + } + if a.RateLimitResetAt != nil { + create.SetRateLimitResetAt(*a.RateLimitResetAt) + } + if a.OverloadUntil != nil { + create.SetOverloadUntil(*a.OverloadUntil) + } + if a.SessionWindowStart != nil { + create.SetSessionWindowStart(*a.SessionWindowStart) + } + if a.SessionWindowEnd != nil { + create.SetSessionWindowEnd(*a.SessionWindowEnd) + } + if a.SessionWindowStatus != "" { + create.SetSessionWindowStatus(a.SessionWindowStatus) + } + if !a.CreatedAt.IsZero() { + create.SetCreatedAt(a.CreatedAt) + } + if !a.UpdatedAt.IsZero() { + create.SetUpdatedAt(a.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create account") + + a.ID = created.ID + a.CreatedAt = created.CreatedAt + a.UpdatedAt = created.UpdatedAt return a } -func mustCreateApiKey(t *testing.T, db *gorm.DB, k *apiKeyModel) *apiKeyModel { +func mustCreateApiKey(t *testing.T, client *dbent.Client, k *service.ApiKey) *service.ApiKey { t.Helper() + ctx := context.Background() + if k.Status == "" { k.Status = service.StatusActive } - if k.CreatedAt.IsZero() { - k.CreatedAt = time.Now() + if k.Key == "" { + k.Key = "sk-" + time.Now().Format("150405.000000") } - if k.UpdatedAt.IsZero() { - k.UpdatedAt = k.CreatedAt + if k.Name == "" { + k.Name = "default" } - require.NoError(t, db.Create(k).Error, "create api key") + + create := client.ApiKey.Create(). + SetUserID(k.UserID). + SetKey(k.Key). + SetName(k.Name). + SetStatus(k.Status) + if k.GroupID != nil { + create.SetGroupID(*k.GroupID) + } + if !k.CreatedAt.IsZero() { + create.SetCreatedAt(k.CreatedAt) + } + if !k.UpdatedAt.IsZero() { + create.SetUpdatedAt(k.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create api key") + + k.ID = created.ID + k.CreatedAt = created.CreatedAt + k.UpdatedAt = created.UpdatedAt return k } -func mustCreateRedeemCode(t *testing.T, db *gorm.DB, c *redeemCodeModel) *redeemCodeModel { +func mustCreateRedeemCode(t *testing.T, client *dbent.Client, c *service.RedeemCode) *service.RedeemCode { t.Helper() + ctx := context.Background() + if c.Status == "" { c.Status = service.StatusUnused } if c.Type == "" { c.Type = service.RedeemTypeBalance } - if c.CreatedAt.IsZero() { - c.CreatedAt = time.Now() + if c.Code == "" { + c.Code = "rc-" + time.Now().Format("150405.000000") } - require.NoError(t, db.Create(c).Error, "create redeem code") + + create := client.RedeemCode.Create(). + SetCode(c.Code). + SetType(c.Type). + SetValue(c.Value). + SetStatus(c.Status). + SetNotes(c.Notes). + SetValidityDays(c.ValidityDays) + if c.UsedBy != nil { + create.SetUsedBy(*c.UsedBy) + } + if c.UsedAt != nil { + create.SetUsedAt(*c.UsedAt) + } + if c.GroupID != nil { + create.SetGroupID(*c.GroupID) + } + if !c.CreatedAt.IsZero() { + create.SetCreatedAt(c.CreatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create redeem code") + + c.ID = created.ID + c.CreatedAt = created.CreatedAt return c } -func mustCreateSubscription(t *testing.T, db *gorm.DB, s *userSubscriptionModel) *userSubscriptionModel { +func mustCreateSubscription(t *testing.T, client *dbent.Client, s *service.UserSubscription) *service.UserSubscription { t.Helper() + ctx := context.Background() + if s.Status == "" { s.Status = service.SubscriptionStatusActive } @@ -162,16 +347,47 @@ func mustCreateSubscription(t *testing.T, db *gorm.DB, s *userSubscriptionModel) if s.UpdatedAt.IsZero() { s.UpdatedAt = now } - require.NoError(t, db.Create(s).Error, "create user subscription") + + create := client.UserSubscription.Create(). + SetUserID(s.UserID). + SetGroupID(s.GroupID). + SetStartsAt(s.StartsAt). + SetExpiresAt(s.ExpiresAt). + SetStatus(s.Status). + SetAssignedAt(s.AssignedAt). + SetNotes(s.Notes). + SetDailyUsageUsd(s.DailyUsageUSD). + SetWeeklyUsageUsd(s.WeeklyUsageUSD). + SetMonthlyUsageUsd(s.MonthlyUsageUSD) + + if s.AssignedBy != nil { + create.SetAssignedBy(*s.AssignedBy) + } + if !s.CreatedAt.IsZero() { + create.SetCreatedAt(s.CreatedAt) + } + if !s.UpdatedAt.IsZero() { + create.SetUpdatedAt(s.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create user subscription") + + s.ID = created.ID + s.CreatedAt = created.CreatedAt + s.UpdatedAt = created.UpdatedAt return s } -func mustBindAccountToGroup(t *testing.T, db *gorm.DB, accountID, groupID int64, priority int) { +func mustBindAccountToGroup(t *testing.T, client *dbent.Client, accountID, groupID int64, priority int) { t.Helper() - require.NoError(t, db.Create(&accountGroupModel{ - AccountID: accountID, - GroupID: groupID, - Priority: priority, - CreatedAt: time.Now(), - }).Error, "create account_group") + ctx := context.Background() + + _, err := client.AccountGroup.Create(). + SetAccountID(accountID). + SetGroupID(groupID). + SetPriority(priority). + Save(ctx) + require.NoError(t, err, "create account_group") } + diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index 688d2655..5b9563a2 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -2,280 +2,370 @@ package repository import ( "context" - "time" - - "github.com/Wei-Shaw/sub2api/internal/service" + "database/sql" + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" - "gorm.io/gorm" - "gorm.io/gorm/clause" + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" ) +type sqlExecutor interface { + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row +} + +type sqlBeginner interface { + sqlExecutor + BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) +} + type groupRepository struct { - db *gorm.DB + client *dbent.Client + sql sqlExecutor + begin sqlBeginner } -func NewGroupRepository(db *gorm.DB) service.GroupRepository { - return &groupRepository{db: db} +func NewGroupRepository(client *dbent.Client, sqlDB *sql.DB) service.GroupRepository { + return newGroupRepositoryWithSQL(client, sqlDB) } -func (r *groupRepository) Create(ctx context.Context, group *service.Group) error { - m := groupModelFromService(group) - err := r.db.WithContext(ctx).Create(m).Error +func newGroupRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *groupRepository { + var beginner sqlBeginner + if b, ok := sqlq.(sqlBeginner); ok { + beginner = b + } + return &groupRepository{client: client, sql: sqlq, begin: beginner} +} + +func (r *groupRepository) Create(ctx context.Context, groupIn *service.Group) error { + builder := r.client.Group.Create(). + SetName(groupIn.Name). + SetDescription(groupIn.Description). + SetPlatform(groupIn.Platform). + SetRateMultiplier(groupIn.RateMultiplier). + SetIsExclusive(groupIn.IsExclusive). + SetStatus(groupIn.Status). + SetSubscriptionType(groupIn.SubscriptionType). + SetNillableDailyLimitUsd(groupIn.DailyLimitUSD). + SetNillableWeeklyLimitUsd(groupIn.WeeklyLimitUSD). + SetNillableMonthlyLimitUsd(groupIn.MonthlyLimitUSD) + + created, err := builder.Save(ctx) if err == nil { - applyGroupModelToService(group, m) + groupIn.ID = created.ID + groupIn.CreatedAt = created.CreatedAt + groupIn.UpdatedAt = created.UpdatedAt } return translatePersistenceError(err, nil, service.ErrGroupExists) } func (r *groupRepository) GetByID(ctx context.Context, id int64) (*service.Group, error) { - var m groupModel - err := r.db.WithContext(ctx).First(&m, id).Error + m, err := r.client.Group.Query(). + Where(group.IDEQ(id)). + Only(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrGroupNotFound, nil) } - group := groupModelToService(&m) - count, _ := r.GetAccountCount(ctx, group.ID) - group.AccountCount = count - return group, nil + + out := groupEntityToService(m) + count, _ := r.GetAccountCount(ctx, out.ID) + out.AccountCount = count + return out, nil } -func (r *groupRepository) Update(ctx context.Context, group *service.Group) error { - m := groupModelFromService(group) - err := r.db.WithContext(ctx).Save(m).Error - if err == nil { - applyGroupModelToService(group, m) +func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) error { + updated, err := r.client.Group.UpdateOneID(groupIn.ID). + SetName(groupIn.Name). + SetDescription(groupIn.Description). + SetPlatform(groupIn.Platform). + SetRateMultiplier(groupIn.RateMultiplier). + SetIsExclusive(groupIn.IsExclusive). + SetStatus(groupIn.Status). + SetSubscriptionType(groupIn.SubscriptionType). + SetNillableDailyLimitUsd(groupIn.DailyLimitUSD). + SetNillableWeeklyLimitUsd(groupIn.WeeklyLimitUSD). + SetNillableMonthlyLimitUsd(groupIn.MonthlyLimitUSD). + Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrGroupNotFound, service.ErrGroupExists) } - return err + groupIn.UpdatedAt = updated.UpdatedAt + return nil } func (r *groupRepository) Delete(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Delete(&groupModel{}, id).Error + _, err := r.client.Group.Delete().Where(group.IDEQ(id)).Exec(ctx) + return err } func (r *groupRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.Group, *pagination.PaginationResult, error) { return r.ListWithFilters(ctx, params, "", "", nil) } -// ListWithFilters lists groups with optional filtering by platform, status, and is_exclusive func (r *groupRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status string, isExclusive *bool) ([]service.Group, *pagination.PaginationResult, error) { - var groups []groupModel - var total int64 + q := r.client.Group.Query() - db := r.db.WithContext(ctx).Model(&groupModel{}) - - // Apply filters if platform != "" { - db = db.Where("platform = ?", platform) + q = q.Where(group.PlatformEQ(platform)) } if status != "" { - db = db.Where("status = ?", status) + q = q.Where(group.StatusEQ(status)) } if isExclusive != nil { - db = db.Where("is_exclusive = ?", *isExclusive) + q = q.Where(group.IsExclusiveEQ(*isExclusive)) } - if err := db.Count(&total).Error; err != nil { + total, err := q.Count(ctx) + if err != nil { return nil, nil, err } - if err := db.Offset(params.Offset()).Limit(params.Limit()).Order("id ASC").Find(&groups).Error; err != nil { + groups, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Asc(group.FieldID)). + All(ctx) + if err != nil { return nil, nil, err } + groupIDs := make([]int64, 0, len(groups)) outGroups := make([]service.Group, 0, len(groups)) for i := range groups { - outGroups = append(outGroups, *groupModelToService(&groups[i])) + g := groupEntityToService(groups[i]) + outGroups = append(outGroups, *g) + groupIDs = append(groupIDs, g.ID) } - // 获取每个分组的账号数量 - for i := range outGroups { - count, _ := r.GetAccountCount(ctx, outGroups[i].ID) - outGroups[i].AccountCount = count + counts, err := r.loadAccountCounts(ctx, groupIDs) + if err == nil { + for i := range outGroups { + outGroups[i].AccountCount = counts[outGroups[i].ID] + } } - return outGroups, paginationResultFromTotal(total, params), nil + return outGroups, paginationResultFromTotal(int64(total), params), nil } func (r *groupRepository) ListActive(ctx context.Context) ([]service.Group, error) { - var groups []groupModel - err := r.db.WithContext(ctx).Where("status = ?", service.StatusActive).Order("id ASC").Find(&groups).Error + groups, err := r.client.Group.Query(). + Where(group.StatusEQ(service.StatusActive)). + Order(dbent.Asc(group.FieldID)). + All(ctx) if err != nil { return nil, err } + + groupIDs := make([]int64, 0, len(groups)) outGroups := make([]service.Group, 0, len(groups)) for i := range groups { - outGroups = append(outGroups, *groupModelToService(&groups[i])) + g := groupEntityToService(groups[i]) + outGroups = append(outGroups, *g) + groupIDs = append(groupIDs, g.ID) } - // 获取每个分组的账号数量 - for i := range outGroups { - count, _ := r.GetAccountCount(ctx, outGroups[i].ID) - outGroups[i].AccountCount = count + + counts, err := r.loadAccountCounts(ctx, groupIDs) + if err == nil { + for i := range outGroups { + outGroups[i].AccountCount = counts[outGroups[i].ID] + } } + return outGroups, nil } func (r *groupRepository) ListActiveByPlatform(ctx context.Context, platform string) ([]service.Group, error) { - var groups []groupModel - err := r.db.WithContext(ctx).Where("status = ? AND platform = ?", service.StatusActive, platform).Order("id ASC").Find(&groups).Error + groups, err := r.client.Group.Query(). + Where(group.StatusEQ(service.StatusActive), group.PlatformEQ(platform)). + Order(dbent.Asc(group.FieldID)). + All(ctx) if err != nil { return nil, err } + + groupIDs := make([]int64, 0, len(groups)) outGroups := make([]service.Group, 0, len(groups)) for i := range groups { - outGroups = append(outGroups, *groupModelToService(&groups[i])) + g := groupEntityToService(groups[i]) + outGroups = append(outGroups, *g) + groupIDs = append(groupIDs, g.ID) } - // 获取每个分组的账号数量 - for i := range outGroups { - count, _ := r.GetAccountCount(ctx, outGroups[i].ID) - outGroups[i].AccountCount = count + + counts, err := r.loadAccountCounts(ctx, groupIDs) + if err == nil { + for i := range outGroups { + outGroups[i].AccountCount = counts[outGroups[i].ID] + } } + return outGroups, nil } func (r *groupRepository) ExistsByName(ctx context.Context, name string) (bool, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&groupModel{}).Where("name = ?", name).Count(&count).Error - return count > 0, err + return r.client.Group.Query().Where(group.NameEQ(name)).Exist(ctx) } func (r *groupRepository) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { var count int64 - err := r.db.WithContext(ctx).Table("account_groups").Where("group_id = ?", groupID).Count(&count).Error - return count, err + if err := r.sql.QueryRowContext(ctx, "SELECT COUNT(*) FROM account_groups WHERE group_id = $1", groupID).Scan(&count); err != nil { + return 0, err + } + return count, nil } -// DeleteAccountGroupsByGroupID 删除分组与账号的关联关系 func (r *groupRepository) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) { - result := r.db.WithContext(ctx).Exec("DELETE FROM account_groups WHERE group_id = ?", groupID) - return result.RowsAffected, result.Error + res, err := r.sql.ExecContext(ctx, "DELETE FROM account_groups WHERE group_id = $1", groupID) + if err != nil { + return 0, err + } + affected, _ := res.RowsAffected() + return affected, nil } func (r *groupRepository) DeleteCascade(ctx context.Context, id int64) ([]int64, error) { - group, err := r.GetByID(ctx, id) + g, err := r.client.Group.Query().Where(group.IDEQ(id)).Only(ctx) if err != nil { + return nil, translatePersistenceError(err, service.ErrGroupNotFound, nil) + } + groupSvc := groupEntityToService(g) + + exec := r.sql + txClient := r.client + var sqlTx *sql.Tx + var txClientClose func() error + + if r.begin != nil { + sqlTx, err = r.begin.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + exec = sqlTx + txClient = entClientFromSQLTx(sqlTx) + txClientClose = txClient.Close + defer func() { _ = sqlTx.Rollback() }() + } + if txClientClose != nil { + defer func() { _ = txClientClose() }() + } + + // Lock the group row to avoid concurrent writes while we cascade. + var lockedID int64 + if err := exec.QueryRowContext(ctx, "SELECT id FROM groups WHERE id = $1 FOR UPDATE", id).Scan(&lockedID); err != nil { + if errorsIsNoRows(err) { + return nil, service.ErrGroupNotFound + } return nil, err } var affectedUserIDs []int64 - if group.IsSubscriptionType() { - if err := r.db.WithContext(ctx). - Table("user_subscriptions"). - Where("group_id = ?", id). - Pluck("user_id", &affectedUserIDs).Error; err != nil { + if groupSvc.IsSubscriptionType() { + rows, err := exec.QueryContext(ctx, "SELECT user_id FROM user_subscriptions WHERE group_id = $1", id) + if err != nil { + return nil, err + } + for rows.Next() { + var userID int64 + if scanErr := rows.Scan(&userID); scanErr != nil { + _ = rows.Close() + return nil, scanErr + } + affectedUserIDs = append(affectedUserIDs, userID) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + + if _, err := exec.ExecContext(ctx, "DELETE FROM user_subscriptions WHERE group_id = $1", id); err != nil { return nil, err } } - err = r.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { - // 1. 删除订阅类型分组的订阅记录 - if group.IsSubscriptionType() { - if err := tx.Exec("DELETE FROM user_subscriptions WHERE group_id = ?", id).Error; err != nil { - return err - } - } - - // 2. 将 api_keys 中绑定该分组的 group_id 设为 nil - if err := tx.Exec("UPDATE api_keys SET group_id = NULL WHERE group_id = ?", id).Error; err != nil { - return err - } - - // 3. 从 users.allowed_groups 数组中移除该分组 ID - if err := tx.Exec( - "UPDATE users SET allowed_groups = array_remove(allowed_groups, ?) WHERE ? = ANY(allowed_groups)", - id, id, - ).Error; err != nil { - return err - } - - // 4. 删除 account_groups 中间表的数据 - if err := tx.Exec("DELETE FROM account_groups WHERE group_id = ?", id).Error; err != nil { - return err - } - - // 5. 删除分组本身(带锁,避免并发写) - if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}).Delete(&groupModel{}, id).Error; err != nil { - return err - } - - return nil - }) - if err != nil { + // 2. Clear group_id for api keys bound to this group. + if _, err := txClient.ApiKey.Update(). + Where(apikey.GroupIDEQ(id)). + ClearGroupID(). + Save(ctx); err != nil { return nil, err } + // 3. Remove the group id from users.allowed_groups array (legacy representation). + // Phase 1 compatibility: also delete from user_allowed_groups join table when present. + if _, err := exec.ExecContext(ctx, "DELETE FROM user_allowed_groups WHERE group_id = $1", id); err != nil { + return nil, err + } + if _, err := exec.ExecContext( + ctx, + "UPDATE users SET allowed_groups = array_remove(allowed_groups, $1) WHERE $1 = ANY(allowed_groups)", + id, + ); err != nil { + return nil, err + } + + // 4. Delete account_groups join rows. + if _, err := exec.ExecContext(ctx, "DELETE FROM account_groups WHERE group_id = $1", id); err != nil { + return nil, err + } + + // 5. Soft-delete group itself. + if _, err := txClient.Group.Delete().Where(group.IDEQ(id)).Exec(ctx); err != nil { + return nil, err + } + + if sqlTx != nil { + if err := sqlTx.Commit(); err != nil { + return nil, err + } + } + return affectedUserIDs, nil } -type groupModel struct { - ID int64 `gorm:"primaryKey"` - Name string `gorm:"uniqueIndex;size:100;not null"` - Description string `gorm:"type:text"` - Platform string `gorm:"size:50;default:anthropic;not null"` - RateMultiplier float64 `gorm:"type:decimal(10,4);default:1.0;not null"` - IsExclusive bool `gorm:"default:false;not null"` - Status string `gorm:"size:20;default:active;not null"` +func (r *groupRepository) loadAccountCounts(ctx context.Context, groupIDs []int64) (map[int64]int64, error) { + counts := make(map[int64]int64, len(groupIDs)) + if len(groupIDs) == 0 { + return counts, nil + } - SubscriptionType string `gorm:"size:20;default:standard;not null"` - DailyLimitUSD *float64 `gorm:"type:decimal(20,8)"` - WeeklyLimitUSD *float64 `gorm:"type:decimal(20,8)"` - MonthlyLimitUSD *float64 `gorm:"type:decimal(20,8)"` + rows, err := r.sql.QueryContext( + ctx, + "SELECT group_id, COUNT(*) FROM account_groups WHERE group_id = ANY($1) GROUP BY group_id", + pq.Array(groupIDs), + ) + if err != nil { + return nil, err + } + defer rows.Close() - CreatedAt time.Time `gorm:"not null"` - UpdatedAt time.Time `gorm:"not null"` - DeletedAt gorm.DeletedAt `gorm:"index"` + for rows.Next() { + var groupID int64 + var count int64 + if err := rows.Scan(&groupID, &count); err != nil { + return nil, err + } + counts[groupID] = count + } + if err := rows.Err(); err != nil { + return nil, err + } + + return counts, nil } -func (groupModel) TableName() string { return "groups" } - -func groupModelToService(m *groupModel) *service.Group { - if m == nil { - return nil - } - return &service.Group{ - ID: m.ID, - Name: m.Name, - Description: m.Description, - Platform: m.Platform, - RateMultiplier: m.RateMultiplier, - IsExclusive: m.IsExclusive, - Status: m.Status, - SubscriptionType: m.SubscriptionType, - DailyLimitUSD: m.DailyLimitUSD, - WeeklyLimitUSD: m.WeeklyLimitUSD, - MonthlyLimitUSD: m.MonthlyLimitUSD, - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - } +func entClientFromSQLTx(tx *sql.Tx) *dbent.Client { + drv := entsql.NewDriver(dialect.Postgres, entsql.Conn{ExecQuerier: tx}) + return dbent.NewClient(dbent.Driver(drv)) } -func groupModelFromService(sg *service.Group) *groupModel { - if sg == nil { - return nil - } - return &groupModel{ - ID: sg.ID, - Name: sg.Name, - Description: sg.Description, - Platform: sg.Platform, - RateMultiplier: sg.RateMultiplier, - IsExclusive: sg.IsExclusive, - Status: sg.Status, - SubscriptionType: sg.SubscriptionType, - DailyLimitUSD: sg.DailyLimitUSD, - WeeklyLimitUSD: sg.WeeklyLimitUSD, - MonthlyLimitUSD: sg.MonthlyLimitUSD, - CreatedAt: sg.CreatedAt, - UpdatedAt: sg.UpdatedAt, - } -} - -func applyGroupModelToService(group *service.Group, m *groupModel) { - if group == nil || m == nil { - return - } - group.ID = m.ID - group.CreatedAt = m.CreatedAt - group.UpdatedAt = m.UpdatedAt +func errorsIsNoRows(err error) bool { + return err == sql.ErrNoRows } diff --git a/backend/internal/repository/group_repo_integration_test.go b/backend/internal/repository/group_repo_integration_test.go index 33ff6326..4bc4ee6a 100644 --- a/backend/internal/repository/group_repo_integration_test.go +++ b/backend/internal/repository/group_repo_integration_test.go @@ -4,25 +4,26 @@ package repository import ( "context" + "database/sql" "testing" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type GroupRepoSuite struct { suite.Suite ctx context.Context - db *gorm.DB + tx *sql.Tx repo *groupRepository } func (s *GroupRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewGroupRepository(s.db).(*groupRepository) + entClient, tx := testEntSQLTx(s.T()) + s.tx = tx + s.repo = newGroupRepositoryWithSQL(entClient, tx) } func TestGroupRepoSuite(t *testing.T) { @@ -33,9 +34,12 @@ func TestGroupRepoSuite(t *testing.T) { func (s *GroupRepoSuite) TestCreate() { group := &service.Group{ - Name: "test-create", - Platform: service.PlatformAnthropic, - Status: service.StatusActive, + Name: "test-create", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, } err := s.repo.Create(s.ctx, group) @@ -50,10 +54,19 @@ func (s *GroupRepoSuite) TestCreate() { func (s *GroupRepoSuite) TestGetByID_NotFound() { _, err := s.repo.GetByID(s.ctx, 999999) s.Require().Error(err, "expected error for non-existent ID") + s.Require().ErrorIs(err, service.ErrGroupNotFound) } func (s *GroupRepoSuite) TestUpdate() { - group := groupModelToService(mustCreateGroup(s.T(), s.db, &groupModel{Name: "original"})) + group := &service.Group{ + Name: "original", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) group.Name = "updated" err := s.repo.Update(s.ctx, group) @@ -65,20 +78,43 @@ func (s *GroupRepoSuite) TestUpdate() { } func (s *GroupRepoSuite) TestDelete() { - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "to-delete"}) + group := &service.Group{ + Name: "to-delete", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) err := s.repo.Delete(s.ctx, group.ID) s.Require().NoError(err, "Delete") _, err = s.repo.GetByID(s.ctx, group.ID) s.Require().Error(err, "expected error after delete") + s.Require().ErrorIs(err, service.ErrGroupNotFound) } // --- List / ListWithFilters --- func (s *GroupRepoSuite) TestList() { - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g1"}) - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g2"}) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) groups, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "List") @@ -87,8 +123,22 @@ func (s *GroupRepoSuite) TestList() { } func (s *GroupRepoSuite) TestListWithFilters_Platform() { - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g1", Platform: service.PlatformAnthropic}) - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g2", Platform: service.PlatformOpenAI}) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformOpenAI, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) groups, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.PlatformOpenAI, "", nil) s.Require().NoError(err) @@ -97,8 +147,22 @@ func (s *GroupRepoSuite) TestListWithFilters_Platform() { } func (s *GroupRepoSuite) TestListWithFilters_Status() { - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g1", Status: service.StatusActive}) - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g2", Status: service.StatusDisabled}) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusDisabled, + SubscriptionType: service.SubscriptionTypeStandard, + })) groups, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", service.StatusDisabled, nil) s.Require().NoError(err) @@ -107,8 +171,22 @@ func (s *GroupRepoSuite) TestListWithFilters_Status() { } func (s *GroupRepoSuite) TestListWithFilters_IsExclusive() { - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g1", IsExclusive: false}) - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g2", IsExclusive: true}) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: true, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) isExclusive := true groups, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", &isExclusive) @@ -118,21 +196,35 @@ func (s *GroupRepoSuite) TestListWithFilters_IsExclusive() { } func (s *GroupRepoSuite) TestListWithFilters_AccountCount() { - g1 := mustCreateGroup(s.T(), s.db, &groupModel{ - Name: "g1", - Platform: service.PlatformAnthropic, - Status: service.StatusActive, - }) - g2 := mustCreateGroup(s.T(), s.db, &groupModel{ - Name: "g2", - Platform: service.PlatformAnthropic, - Status: service.StatusActive, - IsExclusive: true, - }) + g1 := &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + g2 := &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: true, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g1)) + s.Require().NoError(s.repo.Create(s.ctx, g2)) - a := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc1"}) - mustBindAccountToGroup(s.T(), s.db, a.ID, g1.ID, 1) - mustBindAccountToGroup(s.T(), s.db, a.ID, g2.ID, 1) + var accountID int64 + s.Require().NoError(s.tx.QueryRowContext( + s.ctx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + "acc1", service.PlatformAnthropic, service.AccountTypeOAuth, + ).Scan(&accountID)) + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", accountID, g1.ID, 1) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", accountID, g2.ID, 1) + s.Require().NoError(err) isExclusive := true groups, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.PlatformAnthropic, service.StatusActive, &isExclusive) @@ -146,8 +238,22 @@ func (s *GroupRepoSuite) TestListWithFilters_AccountCount() { // --- ListActive / ListActiveByPlatform --- func (s *GroupRepoSuite) TestListActive() { - mustCreateGroup(s.T(), s.db, &groupModel{Name: "active1", Status: service.StatusActive}) - mustCreateGroup(s.T(), s.db, &groupModel{Name: "inactive1", Status: service.StatusDisabled}) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "active1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "inactive1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusDisabled, + SubscriptionType: service.SubscriptionTypeStandard, + })) groups, err := s.repo.ListActive(s.ctx) s.Require().NoError(err, "ListActive") @@ -156,9 +262,30 @@ func (s *GroupRepoSuite) TestListActive() { } func (s *GroupRepoSuite) TestListActiveByPlatform() { - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g1", Platform: service.PlatformAnthropic, Status: service.StatusActive}) - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g2", Platform: service.PlatformOpenAI, Status: service.StatusActive}) - mustCreateGroup(s.T(), s.db, &groupModel{Name: "g3", Platform: service.PlatformAnthropic, Status: service.StatusDisabled}) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformOpenAI, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g3", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusDisabled, + SubscriptionType: service.SubscriptionTypeStandard, + })) groups, err := s.repo.ListActiveByPlatform(s.ctx, service.PlatformAnthropic) s.Require().NoError(err, "ListActiveByPlatform") @@ -169,7 +296,14 @@ func (s *GroupRepoSuite) TestListActiveByPlatform() { // --- ExistsByName --- func (s *GroupRepoSuite) TestExistsByName() { - mustCreateGroup(s.T(), s.db, &groupModel{Name: "existing-group"}) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "existing-group", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) exists, err := s.repo.ExistsByName(s.ctx, "existing-group") s.Require().NoError(err, "ExistsByName") @@ -183,11 +317,33 @@ func (s *GroupRepoSuite) TestExistsByName() { // --- GetAccountCount --- func (s *GroupRepoSuite) TestGetAccountCount() { - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-count"}) - a1 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1"}) - a2 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2"}) - mustBindAccountToGroup(s.T(), s.db, a1.ID, group.ID, 1) - mustBindAccountToGroup(s.T(), s.db, a2.ID, group.ID, 2) + group := &service.Group{ + Name: "g-count", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + var a1 int64 + s.Require().NoError(s.tx.QueryRowContext( + s.ctx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + "a1", service.PlatformAnthropic, service.AccountTypeOAuth, + ).Scan(&a1)) + var a2 int64 + s.Require().NoError(s.tx.QueryRowContext( + s.ctx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + "a2", service.PlatformAnthropic, service.AccountTypeOAuth, + ).Scan(&a2)) + + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a1, group.ID, 1) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a2, group.ID, 2) + s.Require().NoError(err) count, err := s.repo.GetAccountCount(s.ctx, group.ID) s.Require().NoError(err, "GetAccountCount") @@ -195,7 +351,15 @@ func (s *GroupRepoSuite) TestGetAccountCount() { } func (s *GroupRepoSuite) TestGetAccountCount_Empty() { - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-empty"}) + group := &service.Group{ + Name: "g-empty", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) count, err := s.repo.GetAccountCount(s.ctx, group.ID) s.Require().NoError(err) @@ -205,9 +369,23 @@ func (s *GroupRepoSuite) TestGetAccountCount_Empty() { // --- DeleteAccountGroupsByGroupID --- func (s *GroupRepoSuite) TestDeleteAccountGroupsByGroupID() { - g := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-del"}) - a := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-del"}) - mustBindAccountToGroup(s.T(), s.db, a.ID, g.ID, 1) + g := &service.Group{ + Name: "g-del", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g)) + var accountID int64 + s.Require().NoError(s.tx.QueryRowContext( + s.ctx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + "acc-del", service.PlatformAnthropic, service.AccountTypeOAuth, + ).Scan(&accountID)) + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", accountID, g.ID, 1) + s.Require().NoError(err) affected, err := s.repo.DeleteAccountGroupsByGroupID(s.ctx, g.ID) s.Require().NoError(err, "DeleteAccountGroupsByGroupID") @@ -219,13 +397,34 @@ func (s *GroupRepoSuite) TestDeleteAccountGroupsByGroupID() { } func (s *GroupRepoSuite) TestDeleteAccountGroupsByGroupID_MultipleAccounts() { - g := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-multi"}) - a1 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1"}) - a2 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2"}) - a3 := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a3"}) - mustBindAccountToGroup(s.T(), s.db, a1.ID, g.ID, 1) - mustBindAccountToGroup(s.T(), s.db, a2.ID, g.ID, 2) - mustBindAccountToGroup(s.T(), s.db, a3.ID, g.ID, 3) + g := &service.Group{ + Name: "g-multi", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g)) + + insertAccount := func(name string) int64 { + var id int64 + s.Require().NoError(s.tx.QueryRowContext( + s.ctx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + name, service.PlatformAnthropic, service.AccountTypeOAuth, + ).Scan(&id)) + return id + } + a1 := insertAccount("a1") + a2 := insertAccount("a2") + a3 := insertAccount("a3") + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a1, g.ID, 1) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a2, g.ID, 2) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a3, g.ID, 3) + s.Require().NoError(err) affected, err := s.repo.DeleteAccountGroupsByGroupID(s.ctx, g.ID) s.Require().NoError(err) diff --git a/backend/internal/repository/integration_harness_test.go b/backend/internal/repository/integration_harness_test.go index ab248d06..315cb86a 100644 --- a/backend/internal/repository/integration_harness_test.go +++ b/backend/internal/repository/integration_harness_test.go @@ -15,16 +15,19 @@ import ( "testing" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" + _ "github.com/Wei-Shaw/sub2api/ent/runtime" + "github.com/Wei-Shaw/sub2api/internal/infrastructure" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + _ "github.com/lib/pq" redisclient "github.com/redis/go-redis/v9" tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" tcredis "github.com/testcontainers/testcontainers-go/modules/redis" - gormpostgres "gorm.io/driver/postgres" - "gorm.io/gorm" - "gorm.io/gorm/logger" ) const ( @@ -33,7 +36,7 @@ const ( ) var ( - integrationDB *gorm.DB + integrationDB *sql.DB integrationRedis *redisclient.Client redisNamespaceSeq uint64 @@ -88,13 +91,13 @@ func TestMain(m *testing.M) { os.Exit(1) } - integrationDB, err = openGormWithRetry(ctx, dsn, 30*time.Second) + integrationDB, err = openSQLWithRetry(ctx, dsn, 30*time.Second) if err != nil { - log.Printf("failed to open gorm db: %v", err) + log.Printf("failed to open sql db: %v", err) os.Exit(1) } - if err := AutoMigrate(integrationDB); err != nil { - log.Printf("failed to automigrate db: %v", err) + if err := infrastructure.ApplyMigrations(ctx, integrationDB); err != nil { + log.Printf("failed to apply db migrations: %v", err) os.Exit(1) } @@ -121,6 +124,7 @@ func TestMain(m *testing.M) { code := m.Run() _ = integrationRedis.Close() + _ = integrationDB.Close() os.Exit(code) } @@ -147,29 +151,21 @@ func dockerImageExists(ctx context.Context, image string) bool { return cmd.Run() == nil } -func openGormWithRetry(ctx context.Context, dsn string, timeout time.Duration) (*gorm.DB, error) { +func openSQLWithRetry(ctx context.Context, dsn string, timeout time.Duration) (*sql.DB, error) { deadline := time.Now().Add(timeout) var lastErr error for time.Now().Before(deadline) { - db, err := gorm.Open(gormpostgres.Open(dsn), &gorm.Config{ - Logger: logger.Default.LogMode(logger.Silent), - }) + db, err := sql.Open("postgres", dsn) if err != nil { lastErr = err time.Sleep(250 * time.Millisecond) continue } - sqlDB, err := db.DB() - if err != nil { - lastErr = err - time.Sleep(250 * time.Millisecond) - continue - } - - if err := pingWithTimeout(ctx, sqlDB, 2*time.Second); err != nil { + if err := pingWithTimeout(ctx, db, 2*time.Second); err != nil { lastErr = err + _ = db.Close() time.Sleep(250 * time.Millisecond) continue } @@ -186,17 +182,31 @@ func pingWithTimeout(ctx context.Context, db *sql.DB, timeout time.Duration) err return db.PingContext(pingCtx) } -func testTx(t *testing.T) *gorm.DB { +func testTx(t *testing.T) *sql.Tx { t.Helper() - tx := integrationDB.Begin() - require.NoError(t, tx.Error, "begin tx") + tx, err := integrationDB.BeginTx(context.Background(), nil) + require.NoError(t, err, "begin tx") t.Cleanup(func() { - _ = tx.Rollback().Error + _ = tx.Rollback() }) return tx } +func testEntSQLTx(t *testing.T) (*dbent.Client, *sql.Tx) { + t.Helper() + + tx := testTx(t) + drv := entsql.NewDriver(dialect.Postgres, entsql.Conn{ExecQuerier: tx}) + client := dbent.NewClient(dbent.Driver(drv)) + + t.Cleanup(func() { + _ = client.Close() + }) + + return client, tx +} + func testRedis(t *testing.T) *redisclient.Client { t.Helper() @@ -347,18 +357,19 @@ func (s *IntegrationRedisSuite) AssertTTLWithin(ttl, min, max time.Duration) { assertTTLWithin(s.T(), ttl, min, max) } -// IntegrationDBSuite provides a base suite for DB (Gorm) integration tests. -// Embedding suites should call SetupTest to initialize ctx and db. +// IntegrationDBSuite provides a base suite for DB integration tests. +// Embedding suites should call SetupTest to initialize ctx and client. type IntegrationDBSuite struct { suite.Suite - ctx context.Context - db *gorm.DB + ctx context.Context + client *dbent.Client + tx *sql.Tx } -// SetupTest initializes ctx and db for each test method. +// SetupTest initializes ctx and client for each test method. func (s *IntegrationDBSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) + s.client, s.tx = testEntSQLTx(s.T()) } // RequireNoError is a convenience method wrapping require.NoError with s.T(). diff --git a/backend/internal/repository/migrations_schema_integration_test.go b/backend/internal/repository/migrations_schema_integration_test.go new file mode 100644 index 00000000..80b0fad7 --- /dev/null +++ b/backend/internal/repository/migrations_schema_integration_test.go @@ -0,0 +1,90 @@ +//go:build integration + +package repository + +import ( + "context" + "database/sql" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/infrastructure" + "github.com/stretchr/testify/require" +) + +func TestMigrationsRunner_IsIdempotent_AndSchemaIsUpToDate(t *testing.T) { + tx := testTx(t) + + // Re-apply migrations to verify idempotency (no errors, no duplicate rows). + require.NoError(t, infrastructure.ApplyMigrations(context.Background(), integrationDB)) + + // schema_migrations should have at least the current migration set. + var applied int + require.NoError(t, tx.QueryRowContext(context.Background(), "SELECT COUNT(*) FROM schema_migrations").Scan(&applied)) + require.GreaterOrEqual(t, applied, 7, "expected schema_migrations to contain applied migrations") + + // users: columns required by repository queries + requireColumn(t, tx, "users", "username", "character varying", 100, false) + requireColumn(t, tx, "users", "wechat", "character varying", 100, false) + requireColumn(t, tx, "users", "notes", "text", 0, false) + + // accounts: schedulable and rate-limit fields + requireColumn(t, tx, "accounts", "schedulable", "boolean", 0, false) + requireColumn(t, tx, "accounts", "rate_limited_at", "timestamp with time zone", 0, true) + requireColumn(t, tx, "accounts", "rate_limit_reset_at", "timestamp with time zone", 0, true) + requireColumn(t, tx, "accounts", "overload_until", "timestamp with time zone", 0, true) + requireColumn(t, tx, "accounts", "session_window_status", "character varying", 20, true) + + // api_keys: key length should be 128 + requireColumn(t, tx, "api_keys", "key", "character varying", 128, false) + + // redeem_codes: subscription fields + requireColumn(t, tx, "redeem_codes", "group_id", "bigint", 0, true) + requireColumn(t, tx, "redeem_codes", "validity_days", "integer", 0, false) + + // usage_logs: billing_type used by filters/stats + requireColumn(t, tx, "usage_logs", "billing_type", "smallint", 0, false) + + // settings table should exist + var settingsRegclass sql.NullString + require.NoError(t, tx.QueryRowContext(context.Background(), "SELECT to_regclass('public.settings')").Scan(&settingsRegclass)) + require.True(t, settingsRegclass.Valid, "expected settings table to exist") + + // user_allowed_groups table should exist + var uagRegclass sql.NullString + require.NoError(t, tx.QueryRowContext(context.Background(), "SELECT to_regclass('public.user_allowed_groups')").Scan(&uagRegclass)) + require.True(t, uagRegclass.Valid, "expected user_allowed_groups table to exist") +} + +func requireColumn(t *testing.T, tx *sql.Tx, table, column, dataType string, maxLen int, nullable bool) { + t.Helper() + + var row struct { + DataType string + MaxLen sql.NullInt64 + Nullable string + } + + err := tx.QueryRowContext(context.Background(), ` +SELECT + data_type, + character_maximum_length, + is_nullable +FROM information_schema.columns +WHERE table_schema = 'public' + AND table_name = $1 + AND column_name = $2 +`, table, column).Scan(&row.DataType, &row.MaxLen, &row.Nullable) + require.NoError(t, err, "query information_schema.columns for %s.%s", table, column) + require.Equal(t, dataType, row.DataType, "data_type mismatch for %s.%s", table, column) + + if maxLen > 0 { + require.True(t, row.MaxLen.Valid, "expected maxLen for %s.%s", table, column) + require.Equal(t, int64(maxLen), row.MaxLen.Int64, "maxLen mismatch for %s.%s", table, column) + } + + if nullable { + require.Equal(t, "YES", row.Nullable, "nullable mismatch for %s.%s", table, column) + } else { + require.Equal(t, "NO", row.Nullable, "nullable mismatch for %s.%s", table, column) + } +} diff --git a/backend/internal/repository/proxy_repo.go b/backend/internal/repository/proxy_repo.go index 423584fb..26290a79 100644 --- a/backend/internal/repository/proxy_repo.go +++ b/backend/internal/repository/proxy_repo.go @@ -2,52 +2,97 @@ package repository import ( "context" - "time" + "database/sql" + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" - - "gorm.io/gorm" ) +type sqlQuerier interface { + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row +} + type proxyRepository struct { - db *gorm.DB + client *dbent.Client + sql sqlQuerier } -func NewProxyRepository(db *gorm.DB) service.ProxyRepository { - return &proxyRepository{db: db} +func NewProxyRepository(client *dbent.Client, sqlDB *sql.DB) service.ProxyRepository { + return newProxyRepositoryWithSQL(client, sqlDB) } -func (r *proxyRepository) Create(ctx context.Context, proxy *service.Proxy) error { - m := proxyModelFromService(proxy) - err := r.db.WithContext(ctx).Create(m).Error +func newProxyRepositoryWithSQL(client *dbent.Client, sqlq sqlQuerier) *proxyRepository { + return &proxyRepository{client: client, sql: sqlq} +} + +func (r *proxyRepository) Create(ctx context.Context, proxyIn *service.Proxy) error { + builder := r.client.Proxy.Create(). + SetName(proxyIn.Name). + SetProtocol(proxyIn.Protocol). + SetHost(proxyIn.Host). + SetPort(proxyIn.Port). + SetStatus(proxyIn.Status) + if proxyIn.Username != "" { + builder.SetUsername(proxyIn.Username) + } + if proxyIn.Password != "" { + builder.SetPassword(proxyIn.Password) + } + + created, err := builder.Save(ctx) if err == nil { - applyProxyModelToService(proxy, m) + applyProxyEntityToService(proxyIn, created) } return err } func (r *proxyRepository) GetByID(ctx context.Context, id int64) (*service.Proxy, error) { - var m proxyModel - err := r.db.WithContext(ctx).First(&m, id).Error + m, err := r.client.Proxy.Get(ctx, id) if err != nil { - return nil, translatePersistenceError(err, service.ErrProxyNotFound, nil) + if dbent.IsNotFound(err) { + return nil, service.ErrProxyNotFound + } + return nil, err } - return proxyModelToService(&m), nil + return proxyEntityToService(m), nil } -func (r *proxyRepository) Update(ctx context.Context, proxy *service.Proxy) error { - m := proxyModelFromService(proxy) - err := r.db.WithContext(ctx).Save(m).Error +func (r *proxyRepository) Update(ctx context.Context, proxyIn *service.Proxy) error { + builder := r.client.Proxy.UpdateOneID(proxyIn.ID). + SetName(proxyIn.Name). + SetProtocol(proxyIn.Protocol). + SetHost(proxyIn.Host). + SetPort(proxyIn.Port). + SetStatus(proxyIn.Status) + if proxyIn.Username != "" { + builder.SetUsername(proxyIn.Username) + } else { + builder.ClearUsername() + } + if proxyIn.Password != "" { + builder.SetPassword(proxyIn.Password) + } else { + builder.ClearPassword() + } + + updated, err := builder.Save(ctx) if err == nil { - applyProxyModelToService(proxy, m) + applyProxyEntityToService(proxyIn, updated) + return nil + } + if dbent.IsNotFound(err) { + return service.ErrProxyNotFound } return err } func (r *proxyRepository) Delete(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Delete(&proxyModel{}, id).Error + _, err := r.client.Proxy.Delete().Where(proxy.IDEQ(id)).Exec(ctx) + return err } func (r *proxyRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.Proxy, *pagination.PaginationResult, error) { @@ -56,104 +101,111 @@ func (r *proxyRepository) List(ctx context.Context, params pagination.Pagination // ListWithFilters lists proxies with optional filtering by protocol, status, and search query func (r *proxyRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]service.Proxy, *pagination.PaginationResult, error) { - var proxies []proxyModel - var total int64 - - db := r.db.WithContext(ctx).Model(&proxyModel{}) - - // Apply filters + q := r.client.Proxy.Query() if protocol != "" { - db = db.Where("protocol = ?", protocol) + q = q.Where(proxy.ProtocolEQ(protocol)) } if status != "" { - db = db.Where("status = ?", status) + q = q.Where(proxy.StatusEQ(status)) } if search != "" { - searchPattern := "%" + search + "%" - db = db.Where("name ILIKE ?", searchPattern) + q = q.Where(proxy.NameContainsFold(search)) } - if err := db.Count(&total).Error; err != nil { + total, err := q.Count(ctx) + if err != nil { return nil, nil, err } - if err := db.Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&proxies).Error; err != nil { + proxies, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(proxy.FieldID)). + All(ctx) + if err != nil { return nil, nil, err } outProxies := make([]service.Proxy, 0, len(proxies)) for i := range proxies { - outProxies = append(outProxies, *proxyModelToService(&proxies[i])) + outProxies = append(outProxies, *proxyEntityToService(proxies[i])) } - return outProxies, paginationResultFromTotal(total, params), nil + return outProxies, paginationResultFromTotal(int64(total), params), nil } func (r *proxyRepository) ListActive(ctx context.Context) ([]service.Proxy, error) { - var proxies []proxyModel - err := r.db.WithContext(ctx).Where("status = ?", service.StatusActive).Find(&proxies).Error + proxies, err := r.client.Proxy.Query(). + Where(proxy.StatusEQ(service.StatusActive)). + All(ctx) if err != nil { return nil, err } outProxies := make([]service.Proxy, 0, len(proxies)) for i := range proxies { - outProxies = append(outProxies, *proxyModelToService(&proxies[i])) + outProxies = append(outProxies, *proxyEntityToService(proxies[i])) } return outProxies, nil } // ExistsByHostPortAuth checks if a proxy with the same host, port, username, and password exists func (r *proxyRepository) ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&proxyModel{}). - Where("host = ? AND port = ? AND username = ? AND password = ?", host, port, username, password). - Count(&count).Error - if err != nil { - return false, err + q := r.client.Proxy.Query(). + Where(proxy.HostEQ(host), proxy.PortEQ(port)) + + if username == "" { + q = q.Where(proxy.Or(proxy.UsernameIsNil(), proxy.UsernameEQ(""))) + } else { + q = q.Where(proxy.UsernameEQ(username)) } - return count > 0, nil + if password == "" { + q = q.Where(proxy.Or(proxy.PasswordIsNil(), proxy.PasswordEQ(""))) + } else { + q = q.Where(proxy.PasswordEQ(password)) + } + + count, err := q.Count(ctx) + return count > 0, err } // CountAccountsByProxyID returns the number of accounts using a specific proxy func (r *proxyRepository) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) { + row := r.sql.QueryRowContext(ctx, "SELECT COUNT(*) FROM accounts WHERE proxy_id = $1", proxyID) var count int64 - err := r.db.WithContext(ctx).Table("accounts"). - Where("proxy_id = ?", proxyID). - Count(&count).Error - return count, err + if err := row.Scan(&count); err != nil { + return 0, err + } + return count, nil } // GetAccountCountsForProxies returns a map of proxy ID to account count for all proxies func (r *proxyRepository) GetAccountCountsForProxies(ctx context.Context) (map[int64]int64, error) { - type result struct { - ProxyID int64 `gorm:"column:proxy_id"` - Count int64 `gorm:"column:count"` - } - var results []result - err := r.db.WithContext(ctx). - Table("accounts"). - Select("proxy_id, COUNT(*) as count"). - Where("proxy_id IS NOT NULL"). - Group("proxy_id"). - Scan(&results).Error + rows, err := r.sql.QueryContext(ctx, "SELECT proxy_id, COUNT(*) AS count FROM accounts WHERE proxy_id IS NOT NULL GROUP BY proxy_id") if err != nil { return nil, err } + defer rows.Close() counts := make(map[int64]int64) - for _, r := range results { - counts[r.ProxyID] = r.Count + for rows.Next() { + var proxyID, count int64 + if err := rows.Scan(&proxyID, &count); err != nil { + return nil, err + } + counts[proxyID] = count + } + if err := rows.Err(); err != nil { + return nil, err } return counts, nil } // ListActiveWithAccountCount returns all active proxies with account count, sorted by creation time descending func (r *proxyRepository) ListActiveWithAccountCount(ctx context.Context) ([]service.ProxyWithAccountCount, error) { - var proxies []proxyModel - err := r.db.WithContext(ctx). - Where("status = ?", service.StatusActive). - Order("created_at DESC"). - Find(&proxies).Error + proxies, err := r.client.Proxy.Query(). + Where(proxy.StatusEQ(service.StatusActive)). + Order(dbent.Desc(proxy.FieldCreatedAt)). + All(ctx) if err != nil { return nil, err } @@ -167,76 +219,47 @@ func (r *proxyRepository) ListActiveWithAccountCount(ctx context.Context) ([]ser // Build result with account counts result := make([]service.ProxyWithAccountCount, 0, len(proxies)) for i := range proxies { - proxy := proxyModelToService(&proxies[i]) - if proxy == nil { + proxyOut := proxyEntityToService(proxies[i]) + if proxyOut == nil { continue } result = append(result, service.ProxyWithAccountCount{ - Proxy: *proxy, - AccountCount: counts[proxy.ID], + Proxy: *proxyOut, + AccountCount: counts[proxyOut.ID], }) } return result, nil } -type proxyModel struct { - ID int64 `gorm:"primaryKey"` - Name string `gorm:"size:100;not null"` - Protocol string `gorm:"size:20;not null"` - Host string `gorm:"size:255;not null"` - Port int `gorm:"not null"` - Username string `gorm:"size:100"` - Password string `gorm:"size:100"` - Status string `gorm:"size:20;default:active;not null"` - CreatedAt time.Time `gorm:"not null"` - UpdatedAt time.Time `gorm:"not null"` - DeletedAt gorm.DeletedAt `gorm:"index"` -} - -func (proxyModel) TableName() string { return "proxies" } - -func proxyModelToService(m *proxyModel) *service.Proxy { +func proxyEntityToService(m *dbent.Proxy) *service.Proxy { if m == nil { return nil } - return &service.Proxy{ + out := &service.Proxy{ ID: m.ID, Name: m.Name, Protocol: m.Protocol, Host: m.Host, Port: m.Port, - Username: m.Username, - Password: m.Password, Status: m.Status, CreatedAt: m.CreatedAt, UpdatedAt: m.UpdatedAt, } + if m.Username != nil { + out.Username = *m.Username + } + if m.Password != nil { + out.Password = *m.Password + } + return out } -func proxyModelFromService(p *service.Proxy) *proxyModel { - if p == nil { - return nil - } - return &proxyModel{ - ID: p.ID, - Name: p.Name, - Protocol: p.Protocol, - Host: p.Host, - Port: p.Port, - Username: p.Username, - Password: p.Password, - Status: p.Status, - CreatedAt: p.CreatedAt, - UpdatedAt: p.UpdatedAt, - } -} - -func applyProxyModelToService(proxy *service.Proxy, m *proxyModel) { - if proxy == nil || m == nil { +func applyProxyEntityToService(dst *service.Proxy, src *dbent.Proxy) { + if dst == nil || src == nil { return } - proxy.ID = m.ID - proxy.CreatedAt = m.CreatedAt - proxy.UpdatedAt = m.UpdatedAt + dst.ID = src.ID + dst.CreatedAt = src.CreatedAt + dst.UpdatedAt = src.UpdatedAt } diff --git a/backend/internal/repository/proxy_repo_integration_test.go b/backend/internal/repository/proxy_repo_integration_test.go index 3aa02176..6f88528f 100644 --- a/backend/internal/repository/proxy_repo_integration_test.go +++ b/backend/internal/repository/proxy_repo_integration_test.go @@ -4,26 +4,27 @@ package repository import ( "context" + "database/sql" "testing" "time" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type ProxyRepoSuite struct { suite.Suite - ctx context.Context - db *gorm.DB - repo *proxyRepository + ctx context.Context + sqlTx *sql.Tx + repo *proxyRepository } func (s *ProxyRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewProxyRepository(s.db).(*proxyRepository) + entClient, sqlTx := testEntSQLTx(s.T()) + s.sqlTx = sqlTx + s.repo = newProxyRepositoryWithSQL(entClient, sqlTx) } func TestProxyRepoSuite(t *testing.T) { @@ -56,7 +57,14 @@ func (s *ProxyRepoSuite) TestGetByID_NotFound() { } func (s *ProxyRepoSuite) TestUpdate() { - proxy := proxyModelToService(mustCreateProxy(s.T(), s.db, &proxyModel{Name: "original"})) + proxy := &service.Proxy{ + Name: "original", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, proxy)) proxy.Name = "updated" err := s.repo.Update(s.ctx, proxy) @@ -68,7 +76,14 @@ func (s *ProxyRepoSuite) TestUpdate() { } func (s *ProxyRepoSuite) TestDelete() { - proxy := mustCreateProxy(s.T(), s.db, &proxyModel{Name: "to-delete"}) + proxy := &service.Proxy{ + Name: "to-delete", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, proxy)) err := s.repo.Delete(s.ctx, proxy.ID) s.Require().NoError(err, "Delete") @@ -80,8 +95,8 @@ func (s *ProxyRepoSuite) TestDelete() { // --- List / ListWithFilters --- func (s *ProxyRepoSuite) TestList() { - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p1"}) - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p2"}) + s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) proxies, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "List") @@ -90,8 +105,8 @@ func (s *ProxyRepoSuite) TestList() { } func (s *ProxyRepoSuite) TestListWithFilters_Protocol() { - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p1", Protocol: "http"}) - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p2", Protocol: "socks5"}) + s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "socks5", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) proxies, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "socks5", "", "") s.Require().NoError(err) @@ -100,8 +115,8 @@ func (s *ProxyRepoSuite) TestListWithFilters_Protocol() { } func (s *ProxyRepoSuite) TestListWithFilters_Status() { - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p1", Status: service.StatusActive}) - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p2", Status: service.StatusDisabled}) + s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusDisabled}) proxies, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", service.StatusDisabled, "") s.Require().NoError(err) @@ -110,8 +125,8 @@ func (s *ProxyRepoSuite) TestListWithFilters_Status() { } func (s *ProxyRepoSuite) TestListWithFilters_Search() { - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "production-proxy"}) - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "dev-proxy"}) + s.mustCreateProxy(&service.Proxy{Name: "production-proxy", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "dev-proxy", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) proxies, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "prod") s.Require().NoError(err) @@ -122,8 +137,8 @@ func (s *ProxyRepoSuite) TestListWithFilters_Search() { // --- ListActive --- func (s *ProxyRepoSuite) TestListActive() { - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "active1", Status: service.StatusActive}) - mustCreateProxy(s.T(), s.db, &proxyModel{Name: "inactive1", Status: service.StatusDisabled}) + s.mustCreateProxy(&service.Proxy{Name: "active1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "inactive1", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusDisabled}) proxies, err := s.repo.ListActive(s.ctx) s.Require().NoError(err, "ListActive") @@ -134,13 +149,14 @@ func (s *ProxyRepoSuite) TestListActive() { // --- ExistsByHostPortAuth --- func (s *ProxyRepoSuite) TestExistsByHostPortAuth() { - mustCreateProxy(s.T(), s.db, &proxyModel{ + s.mustCreateProxy(&service.Proxy{ Name: "p1", Protocol: "http", Host: "1.2.3.4", Port: 8080, Username: "user", Password: "pass", + Status: service.StatusActive, }) exists, err := s.repo.ExistsByHostPortAuth(s.ctx, "1.2.3.4", 8080, "user", "pass") @@ -153,13 +169,14 @@ func (s *ProxyRepoSuite) TestExistsByHostPortAuth() { } func (s *ProxyRepoSuite) TestExistsByHostPortAuth_NoAuth() { - mustCreateProxy(s.T(), s.db, &proxyModel{ + s.mustCreateProxy(&service.Proxy{ Name: "p-noauth", Protocol: "http", Host: "5.6.7.8", Port: 8081, Username: "", Password: "", + Status: service.StatusActive, }) exists, err := s.repo.ExistsByHostPortAuth(s.ctx, "5.6.7.8", 8081, "", "") @@ -170,10 +187,10 @@ func (s *ProxyRepoSuite) TestExistsByHostPortAuth_NoAuth() { // --- CountAccountsByProxyID --- func (s *ProxyRepoSuite) TestCountAccountsByProxyID() { - proxy := mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p-count"}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1", ProxyID: &proxy.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2", ProxyID: &proxy.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a3"}) // no proxy + proxy := s.mustCreateProxy(&service.Proxy{Name: "p-count", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustInsertAccount("a1", &proxy.ID) + s.mustInsertAccount("a2", &proxy.ID) + s.mustInsertAccount("a3", nil) // no proxy count, err := s.repo.CountAccountsByProxyID(s.ctx, proxy.ID) s.Require().NoError(err, "CountAccountsByProxyID") @@ -181,7 +198,7 @@ func (s *ProxyRepoSuite) TestCountAccountsByProxyID() { } func (s *ProxyRepoSuite) TestCountAccountsByProxyID_Zero() { - proxy := mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p-zero"}) + proxy := s.mustCreateProxy(&service.Proxy{Name: "p-zero", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) count, err := s.repo.CountAccountsByProxyID(s.ctx, proxy.ID) s.Require().NoError(err) @@ -191,12 +208,12 @@ func (s *ProxyRepoSuite) TestCountAccountsByProxyID_Zero() { // --- GetAccountCountsForProxies --- func (s *ProxyRepoSuite) TestGetAccountCountsForProxies() { - p1 := mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p1"}) - p2 := mustCreateProxy(s.T(), s.db, &proxyModel{Name: "p2"}) + p1 := s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + p2 := s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1", ProxyID: &p1.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2", ProxyID: &p1.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a3", ProxyID: &p2.ID}) + s.mustInsertAccount("a1", &p1.ID) + s.mustInsertAccount("a2", &p1.ID) + s.mustInsertAccount("a3", &p2.ID) counts, err := s.repo.GetAccountCountsForProxies(s.ctx) s.Require().NoError(err, "GetAccountCountsForProxies") @@ -215,24 +232,13 @@ func (s *ProxyRepoSuite) TestGetAccountCountsForProxies_Empty() { func (s *ProxyRepoSuite) TestListActiveWithAccountCount() { base := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) - p1 := mustCreateProxy(s.T(), s.db, &proxyModel{ - Name: "p1", - Status: service.StatusActive, - CreatedAt: base.Add(-1 * time.Hour), - }) - p2 := mustCreateProxy(s.T(), s.db, &proxyModel{ - Name: "p2", - Status: service.StatusActive, - CreatedAt: base, - }) - mustCreateProxy(s.T(), s.db, &proxyModel{ - Name: "p3-inactive", - Status: service.StatusDisabled, - }) + p1 := s.mustCreateProxyWithTimes("p1", service.StatusActive, base.Add(-1*time.Hour)) + p2 := s.mustCreateProxyWithTimes("p2", service.StatusActive, base) + s.mustCreateProxyWithTimes("p3-inactive", service.StatusDisabled, base.Add(1*time.Hour)) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1", ProxyID: &p1.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2", ProxyID: &p1.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a3", ProxyID: &p2.ID}) + s.mustInsertAccount("a1", &p1.ID) + s.mustInsertAccount("a2", &p1.ID) + s.mustInsertAccount("a3", &p2.ID) withCounts, err := s.repo.ListActiveWithAccountCount(s.ctx) s.Require().NoError(err, "ListActiveWithAccountCount") @@ -248,34 +254,16 @@ func (s *ProxyRepoSuite) TestListActiveWithAccountCount() { // --- Combined original test --- func (s *ProxyRepoSuite) TestExistsByHostPortAuth_And_AccountCountAggregates() { - p1 := mustCreateProxy(s.T(), s.db, &proxyModel{ - Name: "p1", - Protocol: "http", - Host: "1.2.3.4", - Port: 8080, - Username: "u", - Password: "p", - CreatedAt: time.Now().Add(-1 * time.Hour), - UpdatedAt: time.Now().Add(-1 * time.Hour), - }) - p2 := mustCreateProxy(s.T(), s.db, &proxyModel{ - Name: "p2", - Protocol: "http", - Host: "5.6.7.8", - Port: 8081, - Username: "", - Password: "", - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - }) + p1 := s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "1.2.3.4", Port: 8080, Username: "u", Password: "p", Status: service.StatusActive}) + p2 := s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "5.6.7.8", Port: 8081, Username: "", Password: "", Status: service.StatusActive}) exists, err := s.repo.ExistsByHostPortAuth(s.ctx, "1.2.3.4", 8080, "u", "p") s.Require().NoError(err, "ExistsByHostPortAuth") s.Require().True(exists, "expected proxy to exist") - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a1", ProxyID: &p1.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a2", ProxyID: &p1.ID}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a3", ProxyID: &p2.ID}) + s.mustInsertAccount("a1", &p1.ID) + s.mustInsertAccount("a2", &p1.ID) + s.mustInsertAccount("a3", &p2.ID) count1, err := s.repo.CountAccountsByProxyID(s.ctx, p1.ID) s.Require().NoError(err, "CountAccountsByProxyID") @@ -300,3 +288,42 @@ func (s *ProxyRepoSuite) TestExistsByHostPortAuth_And_AccountCountAggregates() { } } } + +func (s *ProxyRepoSuite) mustCreateProxy(p *service.Proxy) *service.Proxy { + s.T().Helper() + s.Require().NoError(s.repo.Create(s.ctx, p), "create proxy") + return p +} + +func (s *ProxyRepoSuite) mustCreateProxyWithTimes(name, status string, createdAt time.Time) *service.Proxy { + s.T().Helper() + + // Use the repository create for standard fields, then update timestamps via raw SQL to keep deterministic ordering. + p := s.mustCreateProxy(&service.Proxy{ + Name: name, + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: status, + }) + _, err := s.sqlTx.ExecContext(s.ctx, "UPDATE proxies SET created_at = $1, updated_at = $1 WHERE id = $2", createdAt, p.ID) + s.Require().NoError(err, "update proxy timestamps") + return p +} + +func (s *ProxyRepoSuite) mustInsertAccount(name string, proxyID *int64) { + s.T().Helper() + var pid any + if proxyID != nil { + pid = *proxyID + } + _, err := s.sqlTx.ExecContext( + s.ctx, + "INSERT INTO accounts (name, platform, type, proxy_id) VALUES ($1, $2, $3, $4)", + name, + service.PlatformAnthropic, + service.AccountTypeOAuth, + pid, + ) + s.Require().NoError(err, "insert account") +} diff --git a/backend/internal/repository/redeem_code_repo.go b/backend/internal/repository/redeem_code_repo.go index 957f2677..1429c678 100644 --- a/backend/internal/repository/redeem_code_repo.go +++ b/backend/internal/repository/redeem_code_repo.go @@ -4,25 +4,35 @@ import ( "context" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" - - "gorm.io/gorm" ) type redeemCodeRepository struct { - db *gorm.DB + client *dbent.Client } -func NewRedeemCodeRepository(db *gorm.DB) service.RedeemCodeRepository { - return &redeemCodeRepository{db: db} +func NewRedeemCodeRepository(client *dbent.Client) service.RedeemCodeRepository { + return &redeemCodeRepository{client: client} } func (r *redeemCodeRepository) Create(ctx context.Context, code *service.RedeemCode) error { - m := redeemCodeModelFromService(code) - err := r.db.WithContext(ctx).Create(m).Error + created, err := r.client.RedeemCode.Create(). + SetCode(code.Code). + SetType(code.Type). + SetValue(code.Value). + SetStatus(code.Status). + SetNotes(code.Notes). + SetValidityDays(code.ValidityDays). + SetNillableUsedBy(code.UsedBy). + SetNillableUsedAt(code.UsedAt). + SetNillableGroupID(code.GroupID). + Save(ctx) if err == nil { - applyRedeemCodeModelToService(code, m) + code.ID = created.ID + code.CreatedAt = created.CreatedAt } return err } @@ -31,36 +41,55 @@ func (r *redeemCodeRepository) CreateBatch(ctx context.Context, codes []service. if len(codes) == 0 { return nil } - models := make([]redeemCodeModel, 0, len(codes)) + + builders := make([]*dbent.RedeemCodeCreate, 0, len(codes)) for i := range codes { - m := redeemCodeModelFromService(&codes[i]) - if m != nil { - models = append(models, *m) - } + c := &codes[i] + b := r.client.RedeemCode.Create(). + SetCode(c.Code). + SetType(c.Type). + SetValue(c.Value). + SetStatus(c.Status). + SetNotes(c.Notes). + SetValidityDays(c.ValidityDays). + SetNillableUsedBy(c.UsedBy). + SetNillableUsedAt(c.UsedAt). + SetNillableGroupID(c.GroupID) + builders = append(builders, b) } - return r.db.WithContext(ctx).Create(&models).Error + + return r.client.RedeemCode.CreateBulk(builders...).Exec(ctx) } func (r *redeemCodeRepository) GetByID(ctx context.Context, id int64) (*service.RedeemCode, error) { - var m redeemCodeModel - err := r.db.WithContext(ctx).First(&m, id).Error + m, err := r.client.RedeemCode.Query(). + Where(redeemcode.IDEQ(id)). + Only(ctx) if err != nil { - return nil, translatePersistenceError(err, service.ErrRedeemCodeNotFound, nil) + if dbent.IsNotFound(err) { + return nil, service.ErrRedeemCodeNotFound + } + return nil, err } - return redeemCodeModelToService(&m), nil + return redeemCodeEntityToService(m), nil } func (r *redeemCodeRepository) GetByCode(ctx context.Context, code string) (*service.RedeemCode, error) { - var m redeemCodeModel - err := r.db.WithContext(ctx).Where("code = ?", code).First(&m).Error + m, err := r.client.RedeemCode.Query(). + Where(redeemcode.CodeEQ(code)). + Only(ctx) if err != nil { - return nil, translatePersistenceError(err, service.ErrRedeemCodeNotFound, nil) + if dbent.IsNotFound(err) { + return nil, service.ErrRedeemCodeNotFound + } + return nil, err } - return redeemCodeModelToService(&m), nil + return redeemCodeEntityToService(m), nil } func (r *redeemCodeRepository) Delete(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Delete(&redeemCodeModel{}, id).Error + _, err := r.client.RedeemCode.Delete().Where(redeemcode.IDEQ(id)).Exec(ctx) + return err } func (r *redeemCodeRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.RedeemCode, *pagination.PaginationResult, error) { @@ -68,61 +97,88 @@ func (r *redeemCodeRepository) List(ctx context.Context, params pagination.Pagin } func (r *redeemCodeRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, codeType, status, search string) ([]service.RedeemCode, *pagination.PaginationResult, error) { - var codes []redeemCodeModel - var total int64 - - db := r.db.WithContext(ctx).Model(&redeemCodeModel{}) + q := r.client.RedeemCode.Query() if codeType != "" { - db = db.Where("type = ?", codeType) + q = q.Where(redeemcode.TypeEQ(codeType)) } if status != "" { - db = db.Where("status = ?", status) + q = q.Where(redeemcode.StatusEQ(status)) } if search != "" { - searchPattern := "%" + search + "%" - db = db.Where("code ILIKE ?", searchPattern) + q = q.Where(redeemcode.CodeContainsFold(search)) } - if err := db.Count(&total).Error; err != nil { + total, err := q.Count(ctx) + if err != nil { return nil, nil, err } - if err := db.Preload("User").Preload("Group").Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&codes).Error; err != nil { + codes, err := q. + WithUser(). + WithGroup(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(redeemcode.FieldID)). + All(ctx) + if err != nil { return nil, nil, err } - outCodes := make([]service.RedeemCode, 0, len(codes)) - for i := range codes { - outCodes = append(outCodes, *redeemCodeModelToService(&codes[i])) - } + outCodes := redeemCodeEntitiesToService(codes) - return outCodes, paginationResultFromTotal(total, params), nil + return outCodes, paginationResultFromTotal(int64(total), params), nil } func (r *redeemCodeRepository) Update(ctx context.Context, code *service.RedeemCode) error { - m := redeemCodeModelFromService(code) - err := r.db.WithContext(ctx).Save(m).Error - if err == nil { - applyRedeemCodeModelToService(code, m) + up := r.client.RedeemCode.UpdateOneID(code.ID). + SetCode(code.Code). + SetType(code.Type). + SetValue(code.Value). + SetStatus(code.Status). + SetNotes(code.Notes). + SetValidityDays(code.ValidityDays) + + if code.UsedBy != nil { + up.SetUsedBy(*code.UsedBy) + } else { + up.ClearUsedBy() } - return err + if code.UsedAt != nil { + up.SetUsedAt(*code.UsedAt) + } else { + up.ClearUsedAt() + } + if code.GroupID != nil { + up.SetGroupID(*code.GroupID) + } else { + up.ClearGroupID() + } + + updated, err := up.Save(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return service.ErrRedeemCodeNotFound + } + return err + } + code.CreatedAt = updated.CreatedAt + return nil } func (r *redeemCodeRepository) Use(ctx context.Context, id, userID int64) error { now := time.Now() - result := r.db.WithContext(ctx).Model(&redeemCodeModel{}). - Where("id = ? AND status = ?", id, service.StatusUnused). - Updates(map[string]any{ - "status": service.StatusUsed, - "used_by": userID, - "used_at": now, - }) - if result.Error != nil { - return result.Error + affected, err := r.client.RedeemCode.Update(). + Where(redeemcode.IDEQ(id), redeemcode.StatusEQ(service.StatusUnused)). + SetStatus(service.StatusUsed). + SetUsedBy(userID). + SetUsedAt(now). + Save(ctx) + if err != nil { + return err } - if result.RowsAffected == 0 { - return service.ErrRedeemCodeUsed.WithCause(gorm.ErrRecordNotFound) + if affected == 0 { + return service.ErrRedeemCodeUsed } return nil } @@ -132,49 +188,24 @@ func (r *redeemCodeRepository) ListByUser(ctx context.Context, userID int64, lim limit = 10 } - var codes []redeemCodeModel - err := r.db.WithContext(ctx). - Preload("Group"). - Where("used_by = ?", userID). - Order("used_at DESC"). + codes, err := r.client.RedeemCode.Query(). + Where(redeemcode.UsedByEQ(userID)). + WithGroup(). + Order(dbent.Desc(redeemcode.FieldUsedAt)). Limit(limit). - Find(&codes).Error + All(ctx) if err != nil { return nil, err } - outCodes := make([]service.RedeemCode, 0, len(codes)) - for i := range codes { - outCodes = append(outCodes, *redeemCodeModelToService(&codes[i])) - } - return outCodes, nil + return redeemCodeEntitiesToService(codes), nil } -type redeemCodeModel struct { - ID int64 `gorm:"primaryKey"` - Code string `gorm:"uniqueIndex;size:32;not null"` - Type string `gorm:"size:20;default:balance;not null"` - Value float64 `gorm:"type:decimal(20,8);not null"` - Status string `gorm:"size:20;default:unused;not null"` - UsedBy *int64 `gorm:"index"` - UsedAt *time.Time - Notes string `gorm:"type:text"` - CreatedAt time.Time `gorm:"not null"` - - GroupID *int64 `gorm:"index"` - ValidityDays int `gorm:"default:30"` - - User *userModel `gorm:"foreignKey:UsedBy"` - Group *groupModel `gorm:"foreignKey:GroupID"` -} - -func (redeemCodeModel) TableName() string { return "redeem_codes" } - -func redeemCodeModelToService(m *redeemCodeModel) *service.RedeemCode { +func redeemCodeEntityToService(m *dbent.RedeemCode) *service.RedeemCode { if m == nil { return nil } - return &service.RedeemCode{ + out := &service.RedeemCode{ ID: m.ID, Code: m.Code, Type: m.Type, @@ -182,38 +213,26 @@ func redeemCodeModelToService(m *redeemCodeModel) *service.RedeemCode { Status: m.Status, UsedBy: m.UsedBy, UsedAt: m.UsedAt, - Notes: m.Notes, + Notes: derefString(m.Notes), CreatedAt: m.CreatedAt, GroupID: m.GroupID, ValidityDays: m.ValidityDays, - User: userModelToService(m.User), - Group: groupModelToService(m.Group), } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + if m.Edges.Group != nil { + out.Group = groupEntityToService(m.Edges.Group) + } + return out } -func redeemCodeModelFromService(r *service.RedeemCode) *redeemCodeModel { - if r == nil { - return nil - } - return &redeemCodeModel{ - ID: r.ID, - Code: r.Code, - Type: r.Type, - Value: r.Value, - Status: r.Status, - UsedBy: r.UsedBy, - UsedAt: r.UsedAt, - Notes: r.Notes, - CreatedAt: r.CreatedAt, - GroupID: r.GroupID, - ValidityDays: r.ValidityDays, +func redeemCodeEntitiesToService(models []*dbent.RedeemCode) []service.RedeemCode { + out := make([]service.RedeemCode, 0, len(models)) + for i := range models { + if s := redeemCodeEntityToService(models[i]); s != nil { + out = append(out, *s) + } } -} - -func applyRedeemCodeModelToService(code *service.RedeemCode, m *redeemCodeModel) { - if code == nil || m == nil { - return - } - code.ID = m.ID - code.CreatedAt = m.CreatedAt + return out } diff --git a/backend/internal/repository/redeem_code_repo_integration_test.go b/backend/internal/repository/redeem_code_repo_integration_test.go index 50427163..ee9f79ed 100644 --- a/backend/internal/repository/redeem_code_repo_integration_test.go +++ b/backend/internal/repository/redeem_code_repo_integration_test.go @@ -7,29 +7,47 @@ import ( "testing" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type RedeemCodeRepoSuite struct { suite.Suite - ctx context.Context - db *gorm.DB - repo *redeemCodeRepository + ctx context.Context + client *dbent.Client + repo *redeemCodeRepository } func (s *RedeemCodeRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewRedeemCodeRepository(s.db).(*redeemCodeRepository) + entClient, _ := testEntSQLTx(s.T()) + s.client = entClient + s.repo = NewRedeemCodeRepository(entClient).(*redeemCodeRepository) } func TestRedeemCodeRepoSuite(t *testing.T) { suite.Run(t, new(RedeemCodeRepoSuite)) } +func (s *RedeemCodeRepoSuite) createUser(email string) *dbent.User { + u, err := s.client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + Save(s.ctx) + s.Require().NoError(err, "create user") + return u +} + +func (s *RedeemCodeRepoSuite) createGroup(name string) *dbent.Group { + g, err := s.client.Group.Create(). + SetName(name). + Save(s.ctx) + s.Require().NoError(err, "create group") + return g +} + // --- Create / CreateBatch / GetByID / GetByCode --- func (s *RedeemCodeRepoSuite) TestCreate() { @@ -70,10 +88,19 @@ func (s *RedeemCodeRepoSuite) TestCreateBatch() { func (s *RedeemCodeRepoSuite) TestGetByID_NotFound() { _, err := s.repo.GetByID(s.ctx, 999999) s.Require().Error(err, "expected error for non-existent ID") + s.Require().ErrorIs(err, service.ErrRedeemCodeNotFound) } func (s *RedeemCodeRepoSuite) TestGetByCode() { - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "GET-BY-CODE", Type: service.RedeemTypeBalance}) + _, err := s.client.RedeemCode.Create(). + SetCode("GET-BY-CODE"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUnused). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + Save(s.ctx) + s.Require().NoError(err, "seed redeem code") got, err := s.repo.GetByCode(s.ctx, "GET-BY-CODE") s.Require().NoError(err, "GetByCode") @@ -83,25 +110,35 @@ func (s *RedeemCodeRepoSuite) TestGetByCode() { func (s *RedeemCodeRepoSuite) TestGetByCode_NotFound() { _, err := s.repo.GetByCode(s.ctx, "NON-EXISTENT") s.Require().Error(err, "expected error for non-existent code") + s.Require().ErrorIs(err, service.ErrRedeemCodeNotFound) } // --- Delete --- func (s *RedeemCodeRepoSuite) TestDelete() { - code := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "TO-DELETE", Type: service.RedeemTypeBalance}) + created, err := s.client.RedeemCode.Create(). + SetCode("TO-DELETE"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUnused). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + Save(s.ctx) + s.Require().NoError(err) - err := s.repo.Delete(s.ctx, code.ID) + err = s.repo.Delete(s.ctx, created.ID) s.Require().NoError(err, "Delete") - _, err = s.repo.GetByID(s.ctx, code.ID) + _, err = s.repo.GetByID(s.ctx, created.ID) s.Require().Error(err, "expected error after delete") + s.Require().ErrorIs(err, service.ErrRedeemCodeNotFound) } // --- List / ListWithFilters --- func (s *RedeemCodeRepoSuite) TestList() { - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "LIST-1", Type: service.RedeemTypeBalance}) - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "LIST-2", Type: service.RedeemTypeBalance}) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "LIST-1", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "LIST-2", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) codes, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "List") @@ -110,8 +147,8 @@ func (s *RedeemCodeRepoSuite) TestList() { } func (s *RedeemCodeRepoSuite) TestListWithFilters_Type() { - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "TYPE-BAL", Type: service.RedeemTypeBalance}) - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "TYPE-SUB", Type: service.RedeemTypeSubscription}) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "TYPE-BAL", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "TYPE-SUB", Type: service.RedeemTypeSubscription, Value: 0, Status: service.StatusUnused})) codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.RedeemTypeSubscription, "", "") s.Require().NoError(err) @@ -120,8 +157,8 @@ func (s *RedeemCodeRepoSuite) TestListWithFilters_Type() { } func (s *RedeemCodeRepoSuite) TestListWithFilters_Status() { - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "STAT-UNUSED", Type: service.RedeemTypeBalance, Status: service.StatusUnused}) - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "STAT-USED", Type: service.RedeemTypeBalance, Status: service.StatusUsed}) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "STAT-UNUSED", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "STAT-USED", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUsed})) codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", service.StatusUsed, "") s.Require().NoError(err) @@ -130,8 +167,8 @@ func (s *RedeemCodeRepoSuite) TestListWithFilters_Status() { } func (s *RedeemCodeRepoSuite) TestListWithFilters_Search() { - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "ALPHA-CODE", Type: service.RedeemTypeBalance}) - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "BETA-CODE", Type: service.RedeemTypeBalance}) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "ALPHA-CODE", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "BETA-CODE", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "alpha") s.Require().NoError(err) @@ -140,12 +177,17 @@ func (s *RedeemCodeRepoSuite) TestListWithFilters_Search() { } func (s *RedeemCodeRepoSuite) TestListWithFilters_GroupPreload() { - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-preload"}) - mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{ - Code: "WITH-GROUP", - Type: service.RedeemTypeSubscription, - GroupID: &group.ID, - }) + group := s.createGroup(uniqueTestValue(s.T(), "g-preload")) + _, err := s.client.RedeemCode.Create(). + SetCode("WITH-GROUP"). + SetType(service.RedeemTypeSubscription). + SetStatus(service.StatusUnused). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetGroupID(group.ID). + Save(s.ctx) + s.Require().NoError(err) codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "") s.Require().NoError(err) @@ -157,7 +199,13 @@ func (s *RedeemCodeRepoSuite) TestListWithFilters_GroupPreload() { // --- Update --- func (s *RedeemCodeRepoSuite) TestUpdate() { - code := redeemCodeModelToService(mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "UPDATE-ME", Type: service.RedeemTypeBalance, Value: 10})) + code := &service.RedeemCode{ + Code: "UPDATE-ME", + Type: service.RedeemTypeBalance, + Value: 10, + Status: service.StatusUnused, + } + s.Require().NoError(s.repo.Create(s.ctx, code)) code.Value = 50 err := s.repo.Update(s.ctx, code) @@ -171,8 +219,9 @@ func (s *RedeemCodeRepoSuite) TestUpdate() { // --- Use --- func (s *RedeemCodeRepoSuite) TestUse() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "use@test.com"}) - code := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "USE-ME", Type: service.RedeemTypeBalance, Status: service.StatusUnused}) + user := s.createUser(uniqueTestValue(s.T(), "use") + "@example.com") + code := &service.RedeemCode{Code: "USE-ME", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused} + s.Require().NoError(s.repo.Create(s.ctx, code)) err := s.repo.Use(s.ctx, code.ID, user.ID) s.Require().NoError(err, "Use") @@ -186,8 +235,9 @@ func (s *RedeemCodeRepoSuite) TestUse() { } func (s *RedeemCodeRepoSuite) TestUse_Idempotency() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "idem@test.com"}) - code := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "IDEM-CODE", Type: service.RedeemTypeBalance, Status: service.StatusUnused}) + user := s.createUser(uniqueTestValue(s.T(), "idem") + "@example.com") + code := &service.RedeemCode{Code: "IDEM-CODE", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused} + s.Require().NoError(s.repo.Create(s.ctx, code)) err := s.repo.Use(s.ctx, code.ID, user.ID) s.Require().NoError(err, "Use first time") @@ -199,8 +249,9 @@ func (s *RedeemCodeRepoSuite) TestUse_Idempotency() { } func (s *RedeemCodeRepoSuite) TestUse_AlreadyUsed() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "already@test.com"}) - code := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{Code: "ALREADY-USED", Type: service.RedeemTypeBalance, Status: service.StatusUsed}) + user := s.createUser(uniqueTestValue(s.T(), "already") + "@example.com") + code := &service.RedeemCode{Code: "ALREADY-USED", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUsed} + s.Require().NoError(s.repo.Create(s.ctx, code)) err := s.repo.Use(s.ctx, code.ID, user.ID) s.Require().Error(err, "expected error for already used code") @@ -210,25 +261,34 @@ func (s *RedeemCodeRepoSuite) TestUse_AlreadyUsed() { // --- ListByUser --- func (s *RedeemCodeRepoSuite) TestListByUser() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listby@test.com"}) + user := s.createUser(uniqueTestValue(s.T(), "listby") + "@example.com") base := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) - // Create codes with explicit used_at for ordering - c1 := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{ - Code: "USER-1", - Type: service.RedeemTypeBalance, - Status: service.StatusUsed, - UsedBy: &user.ID, - }) - s.db.Model(c1).Update("used_at", base) + usedAt1 := base + _, err := s.client.RedeemCode.Create(). + SetCode("USER-1"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(usedAt1). + Save(s.ctx) + s.Require().NoError(err) - c2 := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{ - Code: "USER-2", - Type: service.RedeemTypeBalance, - Status: service.StatusUsed, - UsedBy: &user.ID, - }) - s.db.Model(c2).Update("used_at", base.Add(1*time.Hour)) + usedAt2 := base.Add(1 * time.Hour) + _, err = s.client.RedeemCode.Create(). + SetCode("USER-2"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(usedAt2). + Save(s.ctx) + s.Require().NoError(err) codes, err := s.repo.ListByUser(s.ctx, user.ID, 10) s.Require().NoError(err, "ListByUser") @@ -239,17 +299,21 @@ func (s *RedeemCodeRepoSuite) TestListByUser() { } func (s *RedeemCodeRepoSuite) TestListByUser_WithGroupPreload() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "grp@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-listby"}) + user := s.createUser(uniqueTestValue(s.T(), "grp") + "@example.com") + group := s.createGroup(uniqueTestValue(s.T(), "g-listby")) - c := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{ - Code: "WITH-GRP", - Type: service.RedeemTypeSubscription, - Status: service.StatusUsed, - UsedBy: &user.ID, - GroupID: &group.ID, - }) - s.db.Model(c).Update("used_at", time.Now()) + _, err := s.client.RedeemCode.Create(). + SetCode("WITH-GRP"). + SetType(service.RedeemTypeSubscription). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(time.Now()). + SetGroupID(group.ID). + Save(s.ctx) + s.Require().NoError(err) codes, err := s.repo.ListByUser(s.ctx, user.ID, 10) s.Require().NoError(err) @@ -259,14 +323,18 @@ func (s *RedeemCodeRepoSuite) TestListByUser_WithGroupPreload() { } func (s *RedeemCodeRepoSuite) TestListByUser_DefaultLimit() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "deflimit@test.com"}) - c := mustCreateRedeemCode(s.T(), s.db, &redeemCodeModel{ - Code: "DEF-LIM", - Type: service.RedeemTypeBalance, - Status: service.StatusUsed, - UsedBy: &user.ID, - }) - s.db.Model(c).Update("used_at", time.Now()) + user := s.createUser(uniqueTestValue(s.T(), "deflimit") + "@example.com") + _, err := s.client.RedeemCode.Create(). + SetCode("DEF-LIM"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(time.Now()). + Save(s.ctx) + s.Require().NoError(err) // limit <= 0 should default to 10 codes, err := s.repo.ListByUser(s.ctx, user.ID, 0) @@ -277,12 +345,13 @@ func (s *RedeemCodeRepoSuite) TestListByUser_DefaultLimit() { // --- Combined original test --- func (s *RedeemCodeRepoSuite) TestCreateBatch_Filters_Use_Idempotency_ListByUser() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "rc@example.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-rc"}) + user := s.createUser(uniqueTestValue(s.T(), "rc") + "@example.com") + group := s.createGroup(uniqueTestValue(s.T(), "g-rc")) + groupID := group.ID codes := []service.RedeemCode{ - {Code: "CODEA", Type: service.RedeemTypeBalance, Value: 1, Status: service.StatusUnused, CreatedAt: time.Now()}, - {Code: "CODEB", Type: service.RedeemTypeSubscription, Value: 0, Status: service.StatusUnused, GroupID: &group.ID, ValidityDays: 7, CreatedAt: time.Now()}, + {Code: "CODEA", Type: service.RedeemTypeBalance, Value: 1, Status: service.StatusUnused, Notes: ""}, + {Code: "CODEB", Type: service.RedeemTypeSubscription, Value: 0, Status: service.StatusUnused, Notes: "", GroupID: &groupID, ValidityDays: 7}, } s.Require().NoError(s.repo.CreateBatch(s.ctx, codes), "CreateBatch") @@ -303,10 +372,16 @@ func (s *RedeemCodeRepoSuite) TestCreateBatch_Filters_Use_Idempotency_ListByUser codeA, err := s.repo.GetByCode(s.ctx, "CODEA") s.Require().NoError(err, "GetByCode") - // Use fixed time instead of time.Sleep for deterministic ordering - s.db.Model(&redeemCodeModel{}).Where("id = ?", codeB.ID).Update("used_at", time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)) + // Use fixed time instead of time.Sleep for deterministic ordering. + _, err = s.client.RedeemCode.UpdateOneID(codeB.ID). + SetUsedAt(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)). + Save(s.ctx) + s.Require().NoError(err) s.Require().NoError(s.repo.Use(s.ctx, codeA.ID, user.ID), "Use codeA") - s.db.Model(&redeemCodeModel{}).Where("id = ?", codeA.ID).Update("used_at", time.Date(2025, 1, 1, 13, 0, 0, 0, time.UTC)) + _, err = s.client.RedeemCode.UpdateOneID(codeA.ID). + SetUsedAt(time.Date(2025, 1, 1, 13, 0, 0, 0, time.UTC)). + Save(s.ctx) + s.Require().NoError(err) used, err := s.repo.ListByUser(s.ctx, user.ID, 10) s.Require().NoError(err, "ListByUser") diff --git a/backend/internal/repository/setting_repo.go b/backend/internal/repository/setting_repo.go index 00d3776e..a4550e60 100644 --- a/backend/internal/repository/setting_repo.go +++ b/backend/internal/repository/setting_repo.go @@ -4,27 +4,33 @@ import ( "context" "time" + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/setting" "github.com/Wei-Shaw/sub2api/internal/service" - - "gorm.io/gorm" - "gorm.io/gorm/clause" ) type settingRepository struct { - db *gorm.DB + client *ent.Client } -func NewSettingRepository(db *gorm.DB) service.SettingRepository { - return &settingRepository{db: db} +func NewSettingRepository(client *ent.Client) service.SettingRepository { + return &settingRepository{client: client} } func (r *settingRepository) Get(ctx context.Context, key string) (*service.Setting, error) { - var m settingModel - err := r.db.WithContext(ctx).Where("key = ?", key).First(&m).Error + m, err := r.client.Setting.Query().Where(setting.KeyEQ(key)).Only(ctx) if err != nil { - return nil, translatePersistenceError(err, service.ErrSettingNotFound, nil) + if ent.IsNotFound(err) { + return nil, service.ErrSettingNotFound + } + return nil, err } - return settingModelToService(&m), nil + return &service.Setting{ + ID: m.ID, + Key: m.Key, + Value: m.Value, + UpdatedAt: m.UpdatedAt, + }, nil } func (r *settingRepository) GetValue(ctx context.Context, key string) (string, error) { @@ -36,21 +42,22 @@ func (r *settingRepository) GetValue(ctx context.Context, key string) (string, e } func (r *settingRepository) Set(ctx context.Context, key, value string) error { - m := &settingModel{ - Key: key, - Value: value, - UpdatedAt: time.Now(), - } - - return r.db.WithContext(ctx).Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "key"}}, - DoUpdates: clause.AssignmentColumns([]string{"value", "updated_at"}), - }).Create(m).Error + now := time.Now() + return r.client.Setting. + Create(). + SetKey(key). + SetValue(value). + SetUpdatedAt(now). + OnConflictColumns(setting.FieldKey). + UpdateNewValues(). + Exec(ctx) } func (r *settingRepository) GetMultiple(ctx context.Context, keys []string) (map[string]string, error) { - var settings []settingModel - err := r.db.WithContext(ctx).Where("key IN ?", keys).Find(&settings).Error + if len(keys) == 0 { + return map[string]string{}, nil + } + settings, err := r.client.Setting.Query().Where(setting.KeyIn(keys...)).All(ctx) if err != nil { return nil, err } @@ -63,27 +70,24 @@ func (r *settingRepository) GetMultiple(ctx context.Context, keys []string) (map } func (r *settingRepository) SetMultiple(ctx context.Context, settings map[string]string) error { - return r.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { - for key, value := range settings { - m := &settingModel{ - Key: key, - Value: value, - UpdatedAt: time.Now(), - } - if err := tx.Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "key"}}, - DoUpdates: clause.AssignmentColumns([]string{"value", "updated_at"}), - }).Create(m).Error; err != nil { - return err - } - } + if len(settings) == 0 { return nil - }) + } + + now := time.Now() + builders := make([]*ent.SettingCreate, 0, len(settings)) + for key, value := range settings { + builders = append(builders, r.client.Setting.Create().SetKey(key).SetValue(value).SetUpdatedAt(now)) + } + return r.client.Setting. + CreateBulk(builders...). + OnConflictColumns(setting.FieldKey). + UpdateNewValues(). + Exec(ctx) } func (r *settingRepository) GetAll(ctx context.Context) (map[string]string, error) { - var settings []settingModel - err := r.db.WithContext(ctx).Find(&settings).Error + settings, err := r.client.Setting.Query().All(ctx) if err != nil { return nil, err } @@ -96,26 +100,6 @@ func (r *settingRepository) GetAll(ctx context.Context) (map[string]string, erro } func (r *settingRepository) Delete(ctx context.Context, key string) error { - return r.db.WithContext(ctx).Where("key = ?", key).Delete(&settingModel{}).Error -} - -type settingModel struct { - ID int64 `gorm:"primaryKey"` - Key string `gorm:"uniqueIndex;size:100;not null"` - Value string `gorm:"type:text;not null"` - UpdatedAt time.Time `gorm:"not null"` -} - -func (settingModel) TableName() string { return "settings" } - -func settingModelToService(m *settingModel) *service.Setting { - if m == nil { - return nil - } - return &service.Setting{ - ID: m.ID, - Key: m.Key, - Value: m.Value, - UpdatedAt: m.UpdatedAt, - } + _, err := r.client.Setting.Delete().Where(setting.KeyEQ(key)).Exec(ctx) + return err } diff --git a/backend/internal/repository/setting_repo_integration_test.go b/backend/internal/repository/setting_repo_integration_test.go index e637942e..71fac0b2 100644 --- a/backend/internal/repository/setting_repo_integration_test.go +++ b/backend/internal/repository/setting_repo_integration_test.go @@ -8,20 +8,18 @@ import ( "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type SettingRepoSuite struct { suite.Suite ctx context.Context - db *gorm.DB repo *settingRepository } func (s *SettingRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewSettingRepository(s.db).(*settingRepository) + entClient, _ := testEntSQLTx(s.T()) + s.repo = NewSettingRepository(entClient).(*settingRepository) } func TestSettingRepoSuite(t *testing.T) { diff --git a/backend/internal/repository/soft_delete_ent_integration_test.go b/backend/internal/repository/soft_delete_ent_integration_test.go new file mode 100644 index 00000000..e1e7a35a --- /dev/null +++ b/backend/internal/repository/soft_delete_ent_integration_test.go @@ -0,0 +1,110 @@ +//go:build integration + +package repository + +import ( + "context" + "fmt" + "strings" + "testing" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func uniqueSoftDeleteValue(t *testing.T, prefix string) string { + t.Helper() + safeName := strings.NewReplacer("/", "_", " ", "_").Replace(t.Name()) + return fmt.Sprintf("%s-%s", prefix, safeName) +} + +func createEntUser(t *testing.T, ctx context.Context, client *dbent.Client, email string) *dbent.User { + t.Helper() + + u, err := client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + Save(ctx) + require.NoError(t, err, "create ent user") + return u +} + +func TestEntSoftDelete_ApiKey_DefaultFilterAndSkip(t *testing.T) { + ctx := context.Background() + client, _ := testEntSQLTx(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-user")+"@example.com") + + repo := NewApiKeyRepository(client) + key := &service.ApiKey{ + UserID: u.ID, + Key: uniqueSoftDeleteValue(t, "sk-soft-delete"), + Name: "soft-delete", + Status: service.StatusActive, + } + require.NoError(t, repo.Create(ctx, key), "create api key") + + require.NoError(t, repo.Delete(ctx, key.ID), "soft delete api key") + + _, err := repo.GetByID(ctx, key.ID) + require.ErrorIs(t, err, service.ErrApiKeyNotFound, "deleted rows should be hidden by default") + + _, err = client.ApiKey.Query().Where(apikey.IDEQ(key.ID)).Only(ctx) + require.Error(t, err, "default ent query should not see soft-deleted rows") + require.True(t, dbent.IsNotFound(err), "expected ent not-found after default soft delete filter") + + got, err := client.ApiKey.Query(). + Where(apikey.IDEQ(key.ID)). + Only(mixins.SkipSoftDelete(ctx)) + require.NoError(t, err, "SkipSoftDelete should include soft-deleted rows") + require.NotNil(t, got.DeletedAt, "deleted_at should be set after soft delete") +} + +func TestEntSoftDelete_ApiKey_DeleteIdempotent(t *testing.T) { + ctx := context.Background() + client, _ := testEntSQLTx(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-user2")+"@example.com") + + repo := NewApiKeyRepository(client) + key := &service.ApiKey{ + UserID: u.ID, + Key: uniqueSoftDeleteValue(t, "sk-soft-delete2"), + Name: "soft-delete2", + Status: service.StatusActive, + } + require.NoError(t, repo.Create(ctx, key), "create api key") + + require.NoError(t, repo.Delete(ctx, key.ID), "first delete") + require.NoError(t, repo.Delete(ctx, key.ID), "second delete should be idempotent") +} + +func TestEntSoftDelete_ApiKey_HardDeleteViaSkipSoftDelete(t *testing.T) { + ctx := context.Background() + client, _ := testEntSQLTx(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-user3")+"@example.com") + + repo := NewApiKeyRepository(client) + key := &service.ApiKey{ + UserID: u.ID, + Key: uniqueSoftDeleteValue(t, "sk-soft-delete3"), + Name: "soft-delete3", + Status: service.StatusActive, + } + require.NoError(t, repo.Create(ctx, key), "create api key") + + require.NoError(t, repo.Delete(ctx, key.ID), "soft delete api key") + + // Hard delete using SkipSoftDelete so the hook doesn't convert it to update-deleted_at. + _, err := client.ApiKey.Delete().Where(apikey.IDEQ(key.ID)).Exec(mixins.SkipSoftDelete(ctx)) + require.NoError(t, err, "hard delete") + + _, err = client.ApiKey.Query(). + Where(apikey.IDEQ(key.ID)). + Only(mixins.SkipSoftDelete(ctx)) + require.True(t, dbent.IsNotFound(err), "expected row to be hard deleted") +} diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index 84c44a91..246285cf 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -2,99 +2,176 @@ package repository import ( "context" + "database/sql" + "errors" + "fmt" + "strings" "time" - "github.com/Wei-Shaw/sub2api/internal/service" - + dbent "github.com/Wei-Shaw/sub2api/ent" + dbaccount "github.com/Wei-Shaw/sub2api/ent/account" + dbapikey "github.com/Wei-Shaw/sub2api/ent/apikey" + dbgroup "github.com/Wei-Shaw/sub2api/ent/group" + dbuser "github.com/Wei-Shaw/sub2api/ent/user" + dbusersub "github.com/Wei-Shaw/sub2api/ent/usersubscription" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" - - "gorm.io/gorm" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" ) +const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, billing_type, stream, duration_ms, first_token_ms, created_at" + type usageLogRepository struct { - db *gorm.DB + client *dbent.Client + sql sqlExecutor } -func NewUsageLogRepository(db *gorm.DB) service.UsageLogRepository { - return &usageLogRepository{db: db} +func NewUsageLogRepository(client *dbent.Client, sqlDB *sql.DB) service.UsageLogRepository { + return newUsageLogRepositoryWithSQL(client, sqlDB) +} + +func newUsageLogRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *usageLogRepository { + return &usageLogRepository{client: client, sql: sqlq} } // getPerformanceStats 获取 RPM 和 TPM(近5分钟平均值,可选按用户过滤) -func (r *usageLogRepository) getPerformanceStats(ctx context.Context, userID int64) (rpm, tpm int64) { +func (r *usageLogRepository) getPerformanceStats(ctx context.Context, userID int64) (rpm, tpm int64, err error) { fiveMinutesAgo := time.Now().Add(-5 * time.Minute) - var perfStats struct { - RequestCount int64 `gorm:"column:request_count"` - TokenCount int64 `gorm:"column:token_count"` - } - - db := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT COUNT(*) as request_count, COALESCE(SUM(input_tokens + output_tokens), 0) as token_count - `). - Where("created_at >= ?", fiveMinutesAgo) - + FROM usage_logs + WHERE created_at >= $1` + args := []any{fiveMinutesAgo} if userID > 0 { - db = db.Where("user_id = ?", userID) + query += " AND user_id = $2" + args = append(args, userID) } - db.Scan(&perfStats) - // 返回5分钟平均值 - return perfStats.RequestCount / 5, perfStats.TokenCount / 5 + var requestCount int64 + var tokenCount int64 + if err := r.sql.QueryRowContext(ctx, query, args...).Scan(&requestCount, &tokenCount); err != nil { + return 0, 0, err + } + return requestCount / 5, tokenCount / 5, nil } func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) error { - m := usageLogModelFromService(log) - err := r.db.WithContext(ctx).Create(m).Error - if err == nil { - applyUsageLogModelToService(log, m) + if log == nil { + return nil } - return err + + createdAt := log.CreatedAt + if createdAt.IsZero() { + createdAt = time.Now() + } + + rateMultiplier := log.RateMultiplier + if rateMultiplier == 0 { + rateMultiplier = 1 + } + + query := ` + INSERT INTO usage_logs ( + user_id, + api_key_id, + account_id, + request_id, + model, + group_id, + subscription_id, + input_tokens, + output_tokens, + cache_creation_tokens, + cache_read_tokens, + cache_creation_5m_tokens, + cache_creation_1h_tokens, + input_cost, + output_cost, + cache_creation_cost, + cache_read_cost, + total_cost, + actual_cost, + rate_multiplier, + billing_type, + stream, + duration_ms, + first_token_ms, + created_at + ) VALUES ( + $1, $2, $3, $4, $5, + $6, $7, + $8, $9, $10, $11, + $12, $13, + $14, $15, $16, $17, $18, $19, + $20, $21, $22, $23, $24, $25 + ) + RETURNING id, created_at + ` + + groupID := nullInt64(log.GroupID) + subscriptionID := nullInt64(log.SubscriptionID) + duration := nullInt(log.DurationMs) + firstToken := nullInt(log.FirstTokenMs) + + row := r.sql.QueryRowContext( + ctx, + query, + log.UserID, + log.ApiKeyID, + log.AccountID, + log.RequestID, + log.Model, + groupID, + subscriptionID, + log.InputTokens, + log.OutputTokens, + log.CacheCreationTokens, + log.CacheReadTokens, + log.CacheCreation5mTokens, + log.CacheCreation1hTokens, + log.InputCost, + log.OutputCost, + log.CacheCreationCost, + log.CacheReadCost, + log.TotalCost, + log.ActualCost, + rateMultiplier, + log.BillingType, + log.Stream, + duration, + firstToken, + createdAt, + ) + + if err := row.Scan(&log.ID, &log.CreatedAt); err != nil { + return err + } + log.RateMultiplier = rateMultiplier + return nil } func (r *usageLogRepository) GetByID(ctx context.Context, id int64) (*service.UsageLog, error) { - var log usageLogModel - err := r.db.WithContext(ctx).First(&log, id).Error + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE id = $1" + log, err := scanUsageLog(r.sql.QueryRowContext(ctx, query, id)) if err != nil { - return nil, translatePersistenceError(err, service.ErrUsageLogNotFound, nil) + if errors.Is(err, sql.ErrNoRows) { + return nil, service.ErrUsageLogNotFound + } + return nil, err } - return usageLogModelToService(&log), nil + return log, nil } func (r *usageLogRepository) ListByUser(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - var total int64 - - db := r.db.WithContext(ctx).Model(&usageLogModel{}).Where("user_id = ?", userID) - - if err := db.Count(&total).Error; err != nil { - return nil, nil, err - } - - if err := db.Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&logs).Error; err != nil { - return nil, nil, err - } - - return usageLogModelsToService(logs), paginationResultFromTotal(total, params), nil + return r.listUsageLogsWithPagination(ctx, "WHERE user_id = $1", []any{userID}, params) } func (r *usageLogRepository) ListByApiKey(ctx context.Context, apiKeyID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - var total int64 - - db := r.db.WithContext(ctx).Model(&usageLogModel{}).Where("api_key_id = ?", apiKeyID) - - if err := db.Count(&total).Error; err != nil { - return nil, nil, err - } - - if err := db.Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&logs).Error; err != nil { - return nil, nil, err - } - - return usageLogModelsToService(logs), paginationResultFromTotal(total, params), nil + return r.listUsageLogsWithPagination(ctx, "WHERE api_key_id = $1", []any{apiKeyID}, params) } // UserStats 用户使用统计 @@ -108,19 +185,24 @@ type UserStats struct { } func (r *usageLogRepository) GetUserStats(ctx context.Context, userID int64, startTime, endTime time.Time) (*UserStats, error) { - var stats UserStats - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT COUNT(*) as total_requests, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, COALESCE(SUM(actual_cost), 0) as total_cost, COALESCE(SUM(input_tokens), 0) as input_tokens, COALESCE(SUM(output_tokens), 0) as output_tokens, COALESCE(SUM(cache_read_tokens), 0) as cache_read_tokens - `). - Where("user_id = ? AND created_at >= ? AND created_at < ?", userID, startTime, endTime). - Scan(&stats).Error - return &stats, err + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + ` + + stats := &UserStats{} + if err := r.sql.QueryRowContext(ctx, query, userID, startTime, endTime). + Scan(&stats.TotalRequests, &stats.TotalTokens, &stats.TotalCost, &stats.InputTokens, &stats.OutputTokens, &stats.CacheReadTokens); err != nil { + return nil, err + } + return stats, nil } // DashboardStats 仪表盘统计 @@ -132,78 +214,51 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS now := time.Now() // 合并用户统计查询 - var userStats struct { - TotalUsers int64 `gorm:"column:total_users"` - TodayNewUsers int64 `gorm:"column:today_new_users"` - ActiveUsers int64 `gorm:"column:active_users"` - } - if err := r.db.WithContext(ctx).Raw(` + userStatsQuery := ` SELECT COUNT(*) as total_users, - COUNT(CASE WHEN created_at >= ? THEN 1 END) as today_new_users, - (SELECT COUNT(DISTINCT user_id) FROM usage_logs WHERE created_at >= ?) as active_users + COUNT(CASE WHEN created_at >= $1 THEN 1 END) as today_new_users, + (SELECT COUNT(DISTINCT user_id) FROM usage_logs WHERE created_at >= $2) as active_users FROM users - `, today, today).Scan(&userStats).Error; err != nil { + WHERE deleted_at IS NULL + ` + if err := r.sql.QueryRowContext(ctx, userStatsQuery, today, today). + Scan(&stats.TotalUsers, &stats.TodayNewUsers, &stats.ActiveUsers); err != nil { return nil, err } - stats.TotalUsers = userStats.TotalUsers - stats.TodayNewUsers = userStats.TodayNewUsers - stats.ActiveUsers = userStats.ActiveUsers // 合并API Key统计查询 - var apiKeyStats struct { - TotalApiKeys int64 `gorm:"column:total_api_keys"` - ActiveApiKeys int64 `gorm:"column:active_api_keys"` - } - if err := r.db.WithContext(ctx).Raw(` + apiKeyStatsQuery := ` SELECT COUNT(*) as total_api_keys, - COUNT(CASE WHEN status = ? THEN 1 END) as active_api_keys + COUNT(CASE WHEN status = $1 THEN 1 END) as active_api_keys FROM api_keys - `, service.StatusActive).Scan(&apiKeyStats).Error; err != nil { + WHERE deleted_at IS NULL + ` + if err := r.sql.QueryRowContext(ctx, apiKeyStatsQuery, service.StatusActive). + Scan(&stats.TotalApiKeys, &stats.ActiveApiKeys); err != nil { return nil, err } - stats.TotalApiKeys = apiKeyStats.TotalApiKeys - stats.ActiveApiKeys = apiKeyStats.ActiveApiKeys // 合并账户统计查询 - var accountStats struct { - TotalAccounts int64 `gorm:"column:total_accounts"` - NormalAccounts int64 `gorm:"column:normal_accounts"` - ErrorAccounts int64 `gorm:"column:error_accounts"` - RateLimitAccounts int64 `gorm:"column:ratelimit_accounts"` - OverloadAccounts int64 `gorm:"column:overload_accounts"` - } - if err := r.db.WithContext(ctx).Raw(` + accountStatsQuery := ` SELECT COUNT(*) as total_accounts, - COUNT(CASE WHEN status = ? AND schedulable = true THEN 1 END) as normal_accounts, - COUNT(CASE WHEN status = ? THEN 1 END) as error_accounts, - COUNT(CASE WHEN rate_limited_at IS NOT NULL AND rate_limit_reset_at > ? THEN 1 END) as ratelimit_accounts, - COUNT(CASE WHEN overload_until IS NOT NULL AND overload_until > ? THEN 1 END) as overload_accounts + COUNT(CASE WHEN status = $1 AND schedulable = true THEN 1 END) as normal_accounts, + COUNT(CASE WHEN status = $2 THEN 1 END) as error_accounts, + COUNT(CASE WHEN rate_limited_at IS NOT NULL AND rate_limit_reset_at > $3 THEN 1 END) as ratelimit_accounts, + COUNT(CASE WHEN overload_until IS NOT NULL AND overload_until > $4 THEN 1 END) as overload_accounts FROM accounts - `, service.StatusActive, service.StatusError, now, now).Scan(&accountStats).Error; err != nil { + WHERE deleted_at IS NULL + ` + if err := r.sql.QueryRowContext(ctx, accountStatsQuery, service.StatusActive, service.StatusError, now, now). + Scan(&stats.TotalAccounts, &stats.NormalAccounts, &stats.ErrorAccounts, &stats.RateLimitAccounts, &stats.OverloadAccounts); err != nil { return nil, err } - stats.TotalAccounts = accountStats.TotalAccounts - stats.NormalAccounts = accountStats.NormalAccounts - stats.ErrorAccounts = accountStats.ErrorAccounts - stats.RateLimitAccounts = accountStats.RateLimitAccounts - stats.OverloadAccounts = accountStats.OverloadAccounts // 累计 Token 统计 - var totalStats struct { - TotalRequests int64 `gorm:"column:total_requests"` - TotalInputTokens int64 `gorm:"column:total_input_tokens"` - TotalOutputTokens int64 `gorm:"column:total_output_tokens"` - TotalCacheCreationTokens int64 `gorm:"column:total_cache_creation_tokens"` - TotalCacheReadTokens int64 `gorm:"column:total_cache_read_tokens"` - TotalCost float64 `gorm:"column:total_cost"` - TotalActualCost float64 `gorm:"column:total_actual_cost"` - AverageDurationMs float64 `gorm:"column:avg_duration_ms"` - } - r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + totalStatsQuery := ` + SELECT COUNT(*) as total_requests, COALESCE(SUM(input_tokens), 0) as total_input_tokens, COALESCE(SUM(output_tokens), 0) as total_output_tokens, @@ -212,31 +267,26 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS COALESCE(SUM(total_cost), 0) as total_cost, COALESCE(SUM(actual_cost), 0) as total_actual_cost, COALESCE(AVG(duration_ms), 0) as avg_duration_ms - `). - Scan(&totalStats) - - stats.TotalRequests = totalStats.TotalRequests - stats.TotalInputTokens = totalStats.TotalInputTokens - stats.TotalOutputTokens = totalStats.TotalOutputTokens - stats.TotalCacheCreationTokens = totalStats.TotalCacheCreationTokens - stats.TotalCacheReadTokens = totalStats.TotalCacheReadTokens + FROM usage_logs + ` + if err := r.sql.QueryRowContext(ctx, totalStatsQuery). + Scan( + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheCreationTokens, + &stats.TotalCacheReadTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens - stats.TotalCost = totalStats.TotalCost - stats.TotalActualCost = totalStats.TotalActualCost - stats.AverageDurationMs = totalStats.AverageDurationMs // 今日 Token 统计 - var todayStats struct { - TodayRequests int64 `gorm:"column:today_requests"` - TodayInputTokens int64 `gorm:"column:today_input_tokens"` - TodayOutputTokens int64 `gorm:"column:today_output_tokens"` - TodayCacheCreationTokens int64 `gorm:"column:today_cache_creation_tokens"` - TodayCacheReadTokens int64 `gorm:"column:today_cache_read_tokens"` - TodayCost float64 `gorm:"column:today_cost"` - TodayActualCost float64 `gorm:"column:today_actual_cost"` - } - r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + todayStatsQuery := ` + SELECT COUNT(*) as today_requests, COALESCE(SUM(input_tokens), 0) as today_input_tokens, COALESCE(SUM(output_tokens), 0) as today_output_tokens, @@ -244,65 +294,48 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS COALESCE(SUM(cache_read_tokens), 0) as today_cache_read_tokens, COALESCE(SUM(total_cost), 0) as today_cost, COALESCE(SUM(actual_cost), 0) as today_actual_cost - `). - Where("created_at >= ?", today). - Scan(&todayStats) - - stats.TodayRequests = todayStats.TodayRequests - stats.TodayInputTokens = todayStats.TodayInputTokens - stats.TodayOutputTokens = todayStats.TodayOutputTokens - stats.TodayCacheCreationTokens = todayStats.TodayCacheCreationTokens - stats.TodayCacheReadTokens = todayStats.TodayCacheReadTokens + FROM usage_logs + WHERE created_at >= $1 + ` + if err := r.sql.QueryRowContext(ctx, todayStatsQuery, today). + Scan( + &stats.TodayRequests, + &stats.TodayInputTokens, + &stats.TodayOutputTokens, + &stats.TodayCacheCreationTokens, + &stats.TodayCacheReadTokens, + &stats.TodayCost, + &stats.TodayActualCost, + ); err != nil { + return nil, err + } stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens - stats.TodayCost = todayStats.TodayCost - stats.TodayActualCost = todayStats.TodayActualCost // 性能指标:RPM 和 TPM(最近1分钟,全局) - stats.Rpm, stats.Tpm = r.getPerformanceStats(ctx, 0) + rpm, tpm, err := r.getPerformanceStats(ctx, 0) + if err != nil { + return nil, err + } + stats.Rpm = rpm + stats.Tpm = tpm return &stats, nil } func (r *usageLogRepository) ListByAccount(ctx context.Context, accountID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - var total int64 - - db := r.db.WithContext(ctx).Model(&usageLogModel{}).Where("account_id = ?", accountID) - - if err := db.Count(&total).Error; err != nil { - return nil, nil, err - } - - if err := db.Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&logs).Error; err != nil { - return nil, nil, err - } - - return usageLogModelsToService(logs), paginationResultFromTotal(total, params), nil + return r.listUsageLogsWithPagination(ctx, "WHERE account_id = $1", []any{accountID}, params) } func (r *usageLogRepository) ListByUserAndTimeRange(ctx context.Context, userID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - err := r.db.WithContext(ctx). - Where("user_id = ? AND created_at >= ? AND created_at < ?", userID, startTime, endTime). - Order("id DESC"). - Find(&logs).Error - return usageLogModelsToService(logs), nil, err + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, userID, startTime, endTime) + return logs, nil, err } // GetUserStatsAggregated returns aggregated usage statistics for a user using database-level aggregation func (r *usageLogRepository) GetUserStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { - var stats struct { - TotalRequests int64 `gorm:"column:total_requests"` - TotalInputTokens int64 `gorm:"column:total_input_tokens"` - TotalOutputTokens int64 `gorm:"column:total_output_tokens"` - TotalCacheTokens int64 `gorm:"column:total_cache_tokens"` - TotalCost float64 `gorm:"column:total_cost"` - TotalActualCost float64 `gorm:"column:total_actual_cost"` - AverageDurationMs float64 `gorm:"column:avg_duration_ms"` - } - - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT COUNT(*) as total_requests, COALESCE(SUM(input_tokens), 0) as total_input_tokens, COALESCE(SUM(output_tokens), 0) as total_output_tokens, @@ -310,40 +343,31 @@ func (r *usageLogRepository) GetUserStatsAggregated(ctx context.Context, userID COALESCE(SUM(total_cost), 0) as total_cost, COALESCE(SUM(actual_cost), 0) as total_actual_cost, COALESCE(AVG(COALESCE(duration_ms, 0)), 0) as avg_duration_ms - `). - Where("user_id = ? AND created_at >= ? AND created_at < ?", userID, startTime, endTime). - Scan(&stats).Error + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + ` - if err != nil { + var stats usagestats.UsageStats + if err := r.sql.QueryRowContext(ctx, query, userID, startTime, endTime). + Scan( + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { return nil, err } - - return &usagestats.UsageStats{ - TotalRequests: stats.TotalRequests, - TotalInputTokens: stats.TotalInputTokens, - TotalOutputTokens: stats.TotalOutputTokens, - TotalCacheTokens: stats.TotalCacheTokens, - TotalTokens: stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens, - TotalCost: stats.TotalCost, - TotalActualCost: stats.TotalActualCost, - AverageDurationMs: stats.AverageDurationMs, - }, nil + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return &stats, nil } // GetApiKeyStatsAggregated returns aggregated usage statistics for an API key using database-level aggregation func (r *usageLogRepository) GetApiKeyStatsAggregated(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { - var stats struct { - TotalRequests int64 `gorm:"column:total_requests"` - TotalInputTokens int64 `gorm:"column:total_input_tokens"` - TotalOutputTokens int64 `gorm:"column:total_output_tokens"` - TotalCacheTokens int64 `gorm:"column:total_cache_tokens"` - TotalCost float64 `gorm:"column:total_cost"` - TotalActualCost float64 `gorm:"column:total_actual_cost"` - AverageDurationMs float64 `gorm:"column:avg_duration_ms"` - } - - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT COUNT(*) as total_requests, COALESCE(SUM(input_tokens), 0) as total_input_tokens, COALESCE(SUM(output_tokens), 0) as total_output_tokens, @@ -351,113 +375,88 @@ func (r *usageLogRepository) GetApiKeyStatsAggregated(ctx context.Context, apiKe COALESCE(SUM(total_cost), 0) as total_cost, COALESCE(SUM(actual_cost), 0) as total_actual_cost, COALESCE(AVG(COALESCE(duration_ms, 0)), 0) as avg_duration_ms - `). - Where("api_key_id = ? AND created_at >= ? AND created_at < ?", apiKeyID, startTime, endTime). - Scan(&stats).Error + FROM usage_logs + WHERE api_key_id = $1 AND created_at >= $2 AND created_at < $3 + ` - if err != nil { + var stats usagestats.UsageStats + if err := r.sql.QueryRowContext(ctx, query, apiKeyID, startTime, endTime). + Scan( + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { return nil, err } - - return &usagestats.UsageStats{ - TotalRequests: stats.TotalRequests, - TotalInputTokens: stats.TotalInputTokens, - TotalOutputTokens: stats.TotalOutputTokens, - TotalCacheTokens: stats.TotalCacheTokens, - TotalTokens: stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens, - TotalCost: stats.TotalCost, - TotalActualCost: stats.TotalActualCost, - AverageDurationMs: stats.AverageDurationMs, - }, nil + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return &stats, nil } func (r *usageLogRepository) ListByApiKeyAndTimeRange(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - err := r.db.WithContext(ctx). - Where("api_key_id = ? AND created_at >= ? AND created_at < ?", apiKeyID, startTime, endTime). - Order("id DESC"). - Find(&logs).Error - return usageLogModelsToService(logs), nil, err + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE api_key_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, apiKeyID, startTime, endTime) + return logs, nil, err } func (r *usageLogRepository) ListByAccountAndTimeRange(ctx context.Context, accountID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - err := r.db.WithContext(ctx). - Where("account_id = ? AND created_at >= ? AND created_at < ?", accountID, startTime, endTime). - Order("id DESC"). - Find(&logs).Error - return usageLogModelsToService(logs), nil, err + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE account_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, accountID, startTime, endTime) + return logs, nil, err } func (r *usageLogRepository) ListByModelAndTimeRange(ctx context.Context, modelName string, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - err := r.db.WithContext(ctx). - Where("model = ? AND created_at >= ? AND created_at < ?", modelName, startTime, endTime). - Order("id DESC"). - Find(&logs).Error - return usageLogModelsToService(logs), nil, err + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE model = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, modelName, startTime, endTime) + return logs, nil, err } func (r *usageLogRepository) Delete(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Delete(&usageLogModel{}, id).Error + _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_logs WHERE id = $1", id) + return err } // GetAccountTodayStats 获取账号今日统计 func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID int64) (*usagestats.AccountStats, error) { today := timezone.Today() - var stats struct { - Requests int64 `gorm:"column:requests"` - Tokens int64 `gorm:"column:tokens"` - Cost float64 `gorm:"column:cost"` - } - - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT COUNT(*) as requests, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, COALESCE(SUM(actual_cost), 0) as cost - `). - Where("account_id = ? AND created_at >= ?", accountID, today). - Scan(&stats).Error + FROM usage_logs + WHERE account_id = $1 AND created_at >= $2 + ` - if err != nil { + stats := &usagestats.AccountStats{} + if err := r.sql.QueryRowContext(ctx, query, accountID, today). + Scan(&stats.Requests, &stats.Tokens, &stats.Cost); err != nil { return nil, err } - - return &usagestats.AccountStats{ - Requests: stats.Requests, - Tokens: stats.Tokens, - Cost: stats.Cost, - }, nil + return stats, nil } // GetAccountWindowStats 获取账号时间窗口内的统计 func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountID int64, startTime time.Time) (*usagestats.AccountStats, error) { - var stats struct { - Requests int64 `gorm:"column:requests"` - Tokens int64 `gorm:"column:tokens"` - Cost float64 `gorm:"column:cost"` - } - - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT COUNT(*) as requests, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, COALESCE(SUM(actual_cost), 0) as cost - `). - Where("account_id = ? AND created_at >= ?", accountID, startTime). - Scan(&stats).Error + FROM usage_logs + WHERE account_id = $1 AND created_at >= $2 + ` - if err != nil { + stats := &usagestats.AccountStats{} + if err := r.sql.QueryRowContext(ctx, query, accountID, startTime). + Scan(&stats.Requests, &stats.Tokens, &stats.Cost); err != nil { return nil, err } - - return &usagestats.AccountStats{ - Requests: stats.Requests, - Tokens: stats.Tokens, - Cost: stats.Cost, - }, nil + return stats, nil } // TrendDataPoint represents a single point in trend data @@ -474,28 +473,22 @@ type ApiKeyUsageTrendPoint = usagestats.ApiKeyUsageTrendPoint // GetApiKeyUsageTrend returns usage trend data grouped by API key and date func (r *usageLogRepository) GetApiKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]ApiKeyUsageTrendPoint, error) { - var results []ApiKeyUsageTrendPoint - - // Choose date format based on granularity - var dateFormat string + dateFormat := "YYYY-MM-DD" if granularity == "hour" { dateFormat = "YYYY-MM-DD HH24:00" - } else { - dateFormat = "YYYY-MM-DD" } - // Use raw SQL for complex subquery - query := ` + query := fmt.Sprintf(` WITH top_keys AS ( SELECT api_key_id FROM usage_logs - WHERE created_at >= ? AND created_at < ? + WHERE created_at >= $1 AND created_at < $2 GROUP BY api_key_id ORDER BY SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) DESC - LIMIT ? + LIMIT $3 ) SELECT - TO_CHAR(u.created_at, '` + dateFormat + `') as date, + TO_CHAR(u.created_at, '%s') as date, u.api_key_id, COALESCE(k.name, '') as key_name, COUNT(*) as requests, @@ -503,43 +496,50 @@ func (r *usageLogRepository) GetApiKeyUsageTrend(ctx context.Context, startTime, FROM usage_logs u LEFT JOIN api_keys k ON u.api_key_id = k.id WHERE u.api_key_id IN (SELECT api_key_id FROM top_keys) - AND u.created_at >= ? AND u.created_at < ? + AND u.created_at >= $4 AND u.created_at < $5 GROUP BY date, u.api_key_id, k.name ORDER BY date ASC, tokens DESC - ` + `, dateFormat) - err := r.db.WithContext(ctx).Raw(query, startTime, endTime, limit, startTime, endTime).Scan(&results).Error + rows, err := r.sql.QueryContext(ctx, query, startTime, endTime, limit, startTime, endTime) if err != nil { return nil, err } + defer rows.Close() + + results := make([]ApiKeyUsageTrendPoint, 0) + for rows.Next() { + var row ApiKeyUsageTrendPoint + if err := rows.Scan(&row.Date, &row.ApiKeyID, &row.KeyName, &row.Requests, &row.Tokens); err != nil { + return nil, err + } + results = append(results, row) + } + if err := rows.Err(); err != nil { + return nil, err + } return results, nil } // GetUserUsageTrend returns usage trend data grouped by user and date func (r *usageLogRepository) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]UserUsageTrendPoint, error) { - var results []UserUsageTrendPoint - - // Choose date format based on granularity - var dateFormat string + dateFormat := "YYYY-MM-DD" if granularity == "hour" { dateFormat = "YYYY-MM-DD HH24:00" - } else { - dateFormat = "YYYY-MM-DD" } - // Use raw SQL for complex subquery - query := ` + query := fmt.Sprintf(` WITH top_users AS ( SELECT user_id FROM usage_logs - WHERE created_at >= ? AND created_at < ? + WHERE created_at >= $1 AND created_at < $2 GROUP BY user_id ORDER BY SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) DESC - LIMIT ? + LIMIT $3 ) SELECT - TO_CHAR(u.created_at, '` + dateFormat + `') as date, + TO_CHAR(u.created_at, '%s') as date, u.user_id, COALESCE(us.email, '') as email, COUNT(*) as requests, @@ -549,15 +549,28 @@ func (r *usageLogRepository) GetUserUsageTrend(ctx context.Context, startTime, e FROM usage_logs u LEFT JOIN users us ON u.user_id = us.id WHERE u.user_id IN (SELECT user_id FROM top_users) - AND u.created_at >= ? AND u.created_at < ? + AND u.created_at >= $4 AND u.created_at < $5 GROUP BY date, u.user_id, us.email ORDER BY date ASC, tokens DESC - ` + `, dateFormat) - err := r.db.WithContext(ctx).Raw(query, startTime, endTime, limit, startTime, endTime).Scan(&results).Error + rows, err := r.sql.QueryContext(ctx, query, startTime, endTime, limit, startTime, endTime) if err != nil { return nil, err } + defer rows.Close() + + results := make([]UserUsageTrendPoint, 0) + for rows.Next() { + var row UserUsageTrendPoint + if err := rows.Scan(&row.Date, &row.UserID, &row.Email, &row.Requests, &row.Tokens, &row.Cost, &row.ActualCost); err != nil { + return nil, err + } + results = append(results, row) + } + if err := rows.Err(); err != nil { + return nil, err + } return results, nil } @@ -567,31 +580,22 @@ type UserDashboardStats = usagestats.UserDashboardStats // GetUserDashboardStats 获取用户专属的仪表盘统计 func (r *usageLogRepository) GetUserDashboardStats(ctx context.Context, userID int64) (*UserDashboardStats, error) { - var stats UserDashboardStats + stats := &UserDashboardStats{} today := timezone.Today() // API Key 统计 - r.db.WithContext(ctx).Model(&apiKeyModel{}). - Where("user_id = ?", userID). - Count(&stats.TotalApiKeys) - - r.db.WithContext(ctx).Model(&apiKeyModel{}). - Where("user_id = ? AND status = ?", userID, service.StatusActive). - Count(&stats.ActiveApiKeys) + if err := r.sql.QueryRowContext(ctx, "SELECT COUNT(*) FROM api_keys WHERE user_id = $1 AND deleted_at IS NULL", userID). + Scan(&stats.TotalApiKeys); err != nil { + return nil, err + } + if err := r.sql.QueryRowContext(ctx, "SELECT COUNT(*) FROM api_keys WHERE user_id = $1 AND status = $2 AND deleted_at IS NULL", userID, service.StatusActive). + Scan(&stats.ActiveApiKeys); err != nil { + return nil, err + } // 累计 Token 统计 - var totalStats struct { - TotalRequests int64 `gorm:"column:total_requests"` - TotalInputTokens int64 `gorm:"column:total_input_tokens"` - TotalOutputTokens int64 `gorm:"column:total_output_tokens"` - TotalCacheCreationTokens int64 `gorm:"column:total_cache_creation_tokens"` - TotalCacheReadTokens int64 `gorm:"column:total_cache_read_tokens"` - TotalCost float64 `gorm:"column:total_cost"` - TotalActualCost float64 `gorm:"column:total_actual_cost"` - AverageDurationMs float64 `gorm:"column:avg_duration_ms"` - } - r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + totalStatsQuery := ` + SELECT COUNT(*) as total_requests, COALESCE(SUM(input_tokens), 0) as total_input_tokens, COALESCE(SUM(output_tokens), 0) as total_output_tokens, @@ -600,32 +604,27 @@ func (r *usageLogRepository) GetUserDashboardStats(ctx context.Context, userID i COALESCE(SUM(total_cost), 0) as total_cost, COALESCE(SUM(actual_cost), 0) as total_actual_cost, COALESCE(AVG(duration_ms), 0) as avg_duration_ms - `). - Where("user_id = ?", userID). - Scan(&totalStats) - - stats.TotalRequests = totalStats.TotalRequests - stats.TotalInputTokens = totalStats.TotalInputTokens - stats.TotalOutputTokens = totalStats.TotalOutputTokens - stats.TotalCacheCreationTokens = totalStats.TotalCacheCreationTokens - stats.TotalCacheReadTokens = totalStats.TotalCacheReadTokens + FROM usage_logs + WHERE user_id = $1 + ` + if err := r.sql.QueryRowContext(ctx, totalStatsQuery, userID). + Scan( + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheCreationTokens, + &stats.TotalCacheReadTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens - stats.TotalCost = totalStats.TotalCost - stats.TotalActualCost = totalStats.TotalActualCost - stats.AverageDurationMs = totalStats.AverageDurationMs // 今日 Token 统计 - var todayStats struct { - TodayRequests int64 `gorm:"column:today_requests"` - TodayInputTokens int64 `gorm:"column:today_input_tokens"` - TodayOutputTokens int64 `gorm:"column:today_output_tokens"` - TodayCacheCreationTokens int64 `gorm:"column:today_cache_creation_tokens"` - TodayCacheReadTokens int64 `gorm:"column:today_cache_read_tokens"` - TodayCost float64 `gorm:"column:today_cost"` - TodayActualCost float64 `gorm:"column:today_actual_cost"` - } - r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + todayStatsQuery := ` + SELECT COUNT(*) as today_requests, COALESCE(SUM(input_tokens), 0) as today_input_tokens, COALESCE(SUM(output_tokens), 0) as today_output_tokens, @@ -633,39 +632,44 @@ func (r *usageLogRepository) GetUserDashboardStats(ctx context.Context, userID i COALESCE(SUM(cache_read_tokens), 0) as today_cache_read_tokens, COALESCE(SUM(total_cost), 0) as today_cost, COALESCE(SUM(actual_cost), 0) as today_actual_cost - `). - Where("user_id = ? AND created_at >= ?", userID, today). - Scan(&todayStats) - - stats.TodayRequests = todayStats.TodayRequests - stats.TodayInputTokens = todayStats.TodayInputTokens - stats.TodayOutputTokens = todayStats.TodayOutputTokens - stats.TodayCacheCreationTokens = todayStats.TodayCacheCreationTokens - stats.TodayCacheReadTokens = todayStats.TodayCacheReadTokens + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 + ` + if err := r.sql.QueryRowContext(ctx, todayStatsQuery, userID, today). + Scan( + &stats.TodayRequests, + &stats.TodayInputTokens, + &stats.TodayOutputTokens, + &stats.TodayCacheCreationTokens, + &stats.TodayCacheReadTokens, + &stats.TodayCost, + &stats.TodayActualCost, + ); err != nil { + return nil, err + } stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens - stats.TodayCost = todayStats.TodayCost - stats.TodayActualCost = todayStats.TodayActualCost // 性能指标:RPM 和 TPM(最近1分钟,仅统计该用户的请求) - stats.Rpm, stats.Tpm = r.getPerformanceStats(ctx, userID) + rpm, tpm, err := r.getPerformanceStats(ctx, userID) + if err != nil { + return nil, err + } + stats.Rpm = rpm + stats.Tpm = tpm - return &stats, nil + return stats, nil } // GetUserUsageTrendByUserID 获取指定用户的使用趋势 func (r *usageLogRepository) GetUserUsageTrendByUserID(ctx context.Context, userID int64, startTime, endTime time.Time, granularity string) ([]TrendDataPoint, error) { - var results []TrendDataPoint - - var dateFormat string + dateFormat := "YYYY-MM-DD" if granularity == "hour" { dateFormat = "YYYY-MM-DD HH24:00" - } else { - dateFormat = "YYYY-MM-DD" } - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` - TO_CHAR(created_at, ?) as date, + query := fmt.Sprintf(` + SELECT + TO_CHAR(created_at, '%s') as date, COUNT(*) as requests, COALESCE(SUM(input_tokens), 0) as input_tokens, COALESCE(SUM(output_tokens), 0) as output_tokens, @@ -673,25 +677,25 @@ func (r *usageLogRepository) GetUserUsageTrendByUserID(ctx context.Context, user COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, COALESCE(SUM(total_cost), 0) as cost, COALESCE(SUM(actual_cost), 0) as actual_cost - `, dateFormat). - Where("user_id = ? AND created_at >= ? AND created_at < ?", userID, startTime, endTime). - Group("date"). - Order("date ASC"). - Scan(&results).Error + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + GROUP BY date + ORDER BY date ASC + `, dateFormat) + rows, err := r.sql.QueryContext(ctx, query, userID, startTime, endTime) if err != nil { return nil, err } + defer rows.Close() - return results, nil + return scanTrendRows(rows) } // GetUserModelStats 获取指定用户的模型统计 func (r *usageLogRepository) GetUserModelStats(ctx context.Context, userID int64, startTime, endTime time.Time) ([]ModelStat, error) { - var results []ModelStat - - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT model, COUNT(*) as requests, COALESCE(SUM(input_tokens), 0) as input_tokens, @@ -699,17 +703,19 @@ func (r *usageLogRepository) GetUserModelStats(ctx context.Context, userID int64 COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, COALESCE(SUM(total_cost), 0) as cost, COALESCE(SUM(actual_cost), 0) as actual_cost - `). - Where("user_id = ? AND created_at >= ? AND created_at < ?", userID, startTime, endTime). - Group("model"). - Order("total_tokens DESC"). - Scan(&results).Error + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + GROUP BY model + ORDER BY total_tokens DESC + ` + rows, err := r.sql.QueryContext(ctx, query, userID, startTime, endTime) if err != nil { return nil, err } + defer rows.Close() - return results, nil + return scanModelStatsRows(rows) } // UsageLogFilters represents filters for usage log queries @@ -717,52 +723,56 @@ type UsageLogFilters = usagestats.UsageLogFilters // ListWithFilters lists usage logs with optional filters (for admin) func (r *usageLogRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters UsageLogFilters) ([]service.UsageLog, *pagination.PaginationResult, error) { - var logs []usageLogModel - var total int64 + conditions := make([]string, 0, 8) + args := make([]any, 0, 8) - db := r.db.WithContext(ctx).Model(&usageLogModel{}) - - // Apply filters if filters.UserID > 0 { - db = db.Where("user_id = ?", filters.UserID) + conditions = append(conditions, fmt.Sprintf("user_id = $%d", len(args)+1)) + args = append(args, filters.UserID) } if filters.ApiKeyID > 0 { - db = db.Where("api_key_id = ?", filters.ApiKeyID) + conditions = append(conditions, fmt.Sprintf("api_key_id = $%d", len(args)+1)) + args = append(args, filters.ApiKeyID) } if filters.AccountID > 0 { - db = db.Where("account_id = ?", filters.AccountID) + conditions = append(conditions, fmt.Sprintf("account_id = $%d", len(args)+1)) + args = append(args, filters.AccountID) } if filters.GroupID > 0 { - db = db.Where("group_id = ?", filters.GroupID) + conditions = append(conditions, fmt.Sprintf("group_id = $%d", len(args)+1)) + args = append(args, filters.GroupID) } if filters.Model != "" { - db = db.Where("model = ?", filters.Model) + conditions = append(conditions, fmt.Sprintf("model = $%d", len(args)+1)) + args = append(args, filters.Model) } if filters.Stream != nil { - db = db.Where("stream = ?", *filters.Stream) + conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) + args = append(args, *filters.Stream) } if filters.BillingType != nil { - db = db.Where("billing_type = ?", *filters.BillingType) + conditions = append(conditions, fmt.Sprintf("billing_type = $%d", len(args)+1)) + args = append(args, int16(*filters.BillingType)) } if filters.StartTime != nil { - db = db.Where("created_at >= ?", *filters.StartTime) + conditions = append(conditions, fmt.Sprintf("created_at >= $%d", len(args)+1)) + args = append(args, *filters.StartTime) } if filters.EndTime != nil { - db = db.Where("created_at <= ?", *filters.EndTime) + conditions = append(conditions, fmt.Sprintf("created_at <= $%d", len(args)+1)) + args = append(args, *filters.EndTime) } - if err := db.Count(&total).Error; err != nil { + whereClause := buildWhere(conditions) + logs, page, err := r.listUsageLogsWithPagination(ctx, whereClause, args, params) + if err != nil { return nil, nil, err } - // Preload user, api_key, account, and group for display - if err := db.Preload("User").Preload("ApiKey").Preload("Account").Preload("Group"). - Offset(params.Offset()).Limit(params.Limit()). - Order("id DESC").Find(&logs).Error; err != nil { + if err := r.hydrateUsageLogAssociations(ctx, logs); err != nil { return nil, nil, err } - - return usageLogModelsToService(logs), paginationResultFromTotal(total, params), nil + return logs, page, nil } // UsageStats represents usage statistics @@ -773,56 +783,64 @@ type BatchUserUsageStats = usagestats.BatchUserUsageStats // GetBatchUserUsageStats gets today and total actual_cost for multiple users func (r *usageLogRepository) GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*BatchUserUsageStats, error) { + result := make(map[int64]*BatchUserUsageStats) if len(userIDs) == 0 { - return make(map[int64]*BatchUserUsageStats), nil + return result, nil } - today := timezone.Today() - result := make(map[int64]*BatchUserUsageStats) - - // Initialize result map for _, id := range userIDs { result[id] = &BatchUserUsageStats{UserID: id} } - // Get total actual_cost per user - var totalStats []struct { - UserID int64 `gorm:"column:user_id"` - TotalCost float64 `gorm:"column:total_cost"` - } - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select("user_id, COALESCE(SUM(actual_cost), 0) as total_cost"). - Where("user_id IN ?", userIDs). - Group("user_id"). - Scan(&totalStats).Error + query := ` + SELECT user_id, COALESCE(SUM(actual_cost), 0) as total_cost + FROM usage_logs + WHERE user_id = ANY($1) + GROUP BY user_id + ` + rows, err := r.sql.QueryContext(ctx, query, pq.Array(userIDs)) if err != nil { return nil, err } - - for _, stat := range totalStats { - if s, ok := result[stat.UserID]; ok { - s.TotalActualCost = stat.TotalCost + for rows.Next() { + var userID int64 + var total float64 + if err := rows.Scan(&userID, &total); err != nil { + _ = rows.Close() + return nil, err + } + if stats, ok := result[userID]; ok { + stats.TotalActualCost = total } } - - // Get today actual_cost per user - var todayStats []struct { - UserID int64 `gorm:"column:user_id"` - TodayCost float64 `gorm:"column:today_cost"` - } - err = r.db.WithContext(ctx).Model(&usageLogModel{}). - Select("user_id, COALESCE(SUM(actual_cost), 0) as today_cost"). - Where("user_id IN ? AND created_at >= ?", userIDs, today). - Group("user_id"). - Scan(&todayStats).Error - if err != nil { + if err := rows.Close(); err != nil { return nil, err } - for _, stat := range todayStats { - if s, ok := result[stat.UserID]; ok { - s.TodayActualCost = stat.TodayCost + today := timezone.Today() + todayQuery := ` + SELECT user_id, COALESCE(SUM(actual_cost), 0) as today_cost + FROM usage_logs + WHERE user_id = ANY($1) AND created_at >= $2 + GROUP BY user_id + ` + rows, err = r.sql.QueryContext(ctx, todayQuery, pq.Array(userIDs), today) + if err != nil { + return nil, err + } + for rows.Next() { + var userID int64 + var total float64 + if err := rows.Scan(&userID, &total); err != nil { + _ = rows.Close() + return nil, err } + if stats, ok := result[userID]; ok { + stats.TodayActualCost = total + } + } + if err := rows.Close(); err != nil { + return nil, err } return result, nil @@ -833,56 +851,64 @@ type BatchApiKeyUsageStats = usagestats.BatchApiKeyUsageStats // GetBatchApiKeyUsageStats gets today and total actual_cost for multiple API keys func (r *usageLogRepository) GetBatchApiKeyUsageStats(ctx context.Context, apiKeyIDs []int64) (map[int64]*BatchApiKeyUsageStats, error) { + result := make(map[int64]*BatchApiKeyUsageStats) if len(apiKeyIDs) == 0 { - return make(map[int64]*BatchApiKeyUsageStats), nil + return result, nil } - today := timezone.Today() - result := make(map[int64]*BatchApiKeyUsageStats) - - // Initialize result map for _, id := range apiKeyIDs { result[id] = &BatchApiKeyUsageStats{ApiKeyID: id} } - // Get total actual_cost per api key - var totalStats []struct { - ApiKeyID int64 `gorm:"column:api_key_id"` - TotalCost float64 `gorm:"column:total_cost"` - } - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select("api_key_id, COALESCE(SUM(actual_cost), 0) as total_cost"). - Where("api_key_id IN ?", apiKeyIDs). - Group("api_key_id"). - Scan(&totalStats).Error + query := ` + SELECT api_key_id, COALESCE(SUM(actual_cost), 0) as total_cost + FROM usage_logs + WHERE api_key_id = ANY($1) + GROUP BY api_key_id + ` + rows, err := r.sql.QueryContext(ctx, query, pq.Array(apiKeyIDs)) if err != nil { return nil, err } - - for _, stat := range totalStats { - if s, ok := result[stat.ApiKeyID]; ok { - s.TotalActualCost = stat.TotalCost + for rows.Next() { + var apiKeyID int64 + var total float64 + if err := rows.Scan(&apiKeyID, &total); err != nil { + _ = rows.Close() + return nil, err + } + if stats, ok := result[apiKeyID]; ok { + stats.TotalActualCost = total } } - - // Get today actual_cost per api key - var todayStats []struct { - ApiKeyID int64 `gorm:"column:api_key_id"` - TodayCost float64 `gorm:"column:today_cost"` - } - err = r.db.WithContext(ctx).Model(&usageLogModel{}). - Select("api_key_id, COALESCE(SUM(actual_cost), 0) as today_cost"). - Where("api_key_id IN ? AND created_at >= ?", apiKeyIDs, today). - Group("api_key_id"). - Scan(&todayStats).Error - if err != nil { + if err := rows.Close(); err != nil { return nil, err } - for _, stat := range todayStats { - if s, ok := result[stat.ApiKeyID]; ok { - s.TodayActualCost = stat.TodayCost + today := timezone.Today() + todayQuery := ` + SELECT api_key_id, COALESCE(SUM(actual_cost), 0) as today_cost + FROM usage_logs + WHERE api_key_id = ANY($1) AND created_at >= $2 + GROUP BY api_key_id + ` + rows, err = r.sql.QueryContext(ctx, todayQuery, pq.Array(apiKeyIDs), today) + if err != nil { + return nil, err + } + for rows.Next() { + var apiKeyID int64 + var total float64 + if err := rows.Scan(&apiKeyID, &total); err != nil { + _ = rows.Close() + return nil, err } + if stats, ok := result[apiKeyID]; ok { + stats.TodayActualCost = total + } + } + if err := rows.Close(); err != nil { + return nil, err } return result, nil @@ -890,18 +916,14 @@ func (r *usageLogRepository) GetBatchApiKeyUsageStats(ctx context.Context, apiKe // GetUsageTrendWithFilters returns usage trend data with optional user/api_key filters func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) ([]TrendDataPoint, error) { - var results []TrendDataPoint - - var dateFormat string + dateFormat := "YYYY-MM-DD" if granularity == "hour" { dateFormat = "YYYY-MM-DD HH24:00" - } else { - dateFormat = "YYYY-MM-DD" } - db := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` - TO_CHAR(created_at, ?) as date, + query := fmt.Sprintf(` + SELECT + TO_CHAR(created_at, '%s') as date, COUNT(*) as requests, COALESCE(SUM(input_tokens), 0) as input_tokens, COALESCE(SUM(output_tokens), 0) as output_tokens, @@ -909,30 +931,34 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, COALESCE(SUM(total_cost), 0) as cost, COALESCE(SUM(actual_cost), 0) as actual_cost - `, dateFormat). - Where("created_at >= ? AND created_at < ?", startTime, endTime) + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + `, dateFormat) + args := []any{startTime, endTime} if userID > 0 { - db = db.Where("user_id = ?", userID) + query += fmt.Sprintf(" AND user_id = $%d", len(args)+1) + args = append(args, userID) } if apiKeyID > 0 { - db = db.Where("api_key_id = ?", apiKeyID) + query += fmt.Sprintf(" AND api_key_id = $%d", len(args)+1) + args = append(args, apiKeyID) } + query += " GROUP BY date ORDER BY date ASC" - err := db.Group("date").Order("date ASC").Scan(&results).Error + rows, err := r.sql.QueryContext(ctx, query, args...) if err != nil { return nil, err } + defer rows.Close() - return results, nil + return scanTrendRows(rows) } // GetModelStatsWithFilters returns model statistics with optional user/api_key filters func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID int64) ([]ModelStat, error) { - var results []ModelStat - - db := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT model, COUNT(*) as requests, COALESCE(SUM(input_tokens), 0) as input_tokens, @@ -940,41 +966,38 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, COALESCE(SUM(total_cost), 0) as cost, COALESCE(SUM(actual_cost), 0) as actual_cost - `). - Where("created_at >= ? AND created_at < ?", startTime, endTime) + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + args := []any{startTime, endTime} if userID > 0 { - db = db.Where("user_id = ?", userID) + query += fmt.Sprintf(" AND user_id = $%d", len(args)+1) + args = append(args, userID) } if apiKeyID > 0 { - db = db.Where("api_key_id = ?", apiKeyID) + query += fmt.Sprintf(" AND api_key_id = $%d", len(args)+1) + args = append(args, apiKeyID) } if accountID > 0 { - db = db.Where("account_id = ?", accountID) + query += fmt.Sprintf(" AND account_id = $%d", len(args)+1) + args = append(args, accountID) } + query += " GROUP BY model ORDER BY total_tokens DESC" - err := db.Group("model").Order("total_tokens DESC").Scan(&results).Error + rows, err := r.sql.QueryContext(ctx, query, args...) if err != nil { return nil, err } + defer rows.Close() - return results, nil + return scanModelStatsRows(rows) } // GetGlobalStats gets usage statistics for all users within a time range func (r *usageLogRepository) GetGlobalStats(ctx context.Context, startTime, endTime time.Time) (*UsageStats, error) { - var stats struct { - TotalRequests int64 `gorm:"column:total_requests"` - TotalInputTokens int64 `gorm:"column:total_input_tokens"` - TotalOutputTokens int64 `gorm:"column:total_output_tokens"` - TotalCacheTokens int64 `gorm:"column:total_cache_tokens"` - TotalCost float64 `gorm:"column:total_cost"` - TotalActualCost float64 `gorm:"column:total_actual_cost"` - AverageDurationMs float64 `gorm:"column:avg_duration_ms"` - } - - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT COUNT(*) as total_requests, COALESCE(SUM(input_tokens), 0) as total_input_tokens, COALESCE(SUM(output_tokens), 0) as total_output_tokens, @@ -982,24 +1005,25 @@ func (r *usageLogRepository) GetGlobalStats(ctx context.Context, startTime, endT COALESCE(SUM(total_cost), 0) as total_cost, COALESCE(SUM(actual_cost), 0) as total_actual_cost, COALESCE(AVG(duration_ms), 0) as avg_duration_ms - `). - Where("created_at >= ? AND created_at <= ?", startTime, endTime). - Scan(&stats).Error + FROM usage_logs + WHERE created_at >= $1 AND created_at <= $2 + ` - if err != nil { + stats := &UsageStats{} + if err := r.sql.QueryRowContext(ctx, query, startTime, endTime). + Scan( + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { return nil, err } - - return &UsageStats{ - TotalRequests: stats.TotalRequests, - TotalInputTokens: stats.TotalInputTokens, - TotalOutputTokens: stats.TotalOutputTokens, - TotalCacheTokens: stats.TotalCacheTokens, - TotalTokens: stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens, - TotalCost: stats.TotalCost, - TotalActualCost: stats.TotalActualCost, - AverageDurationMs: stats.AverageDurationMs, - }, nil + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return stats, nil } // AccountUsageHistory represents daily usage history for an account @@ -1018,48 +1042,49 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID daysCount = 30 } - // Get daily history - var historyResults []struct { - Date string `gorm:"column:date"` - Requests int64 `gorm:"column:requests"` - Tokens int64 `gorm:"column:tokens"` - Cost float64 `gorm:"column:cost"` - ActualCost float64 `gorm:"column:actual_cost"` - } - - err := r.db.WithContext(ctx).Model(&usageLogModel{}). - Select(` + query := ` + SELECT TO_CHAR(created_at, 'YYYY-MM-DD') as date, COUNT(*) as requests, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, COALESCE(SUM(total_cost), 0) as cost, COALESCE(SUM(actual_cost), 0) as actual_cost - `). - Where("account_id = ? AND created_at >= ? AND created_at < ?", accountID, startTime, endTime). - Group("date"). - Order("date ASC"). - Scan(&historyResults).Error + FROM usage_logs + WHERE account_id = $1 AND created_at >= $2 AND created_at < $3 + GROUP BY date + ORDER BY date ASC + ` + + rows, err := r.sql.QueryContext(ctx, query, accountID, startTime, endTime) if err != nil { return nil, err } + defer rows.Close() - // Build history with labels - history := make([]AccountUsageHistory, 0, len(historyResults)) - for _, h := range historyResults { - // Parse date to get label (MM/DD) - t, _ := time.Parse("2006-01-02", h.Date) - label := t.Format("01/02") + history := make([]AccountUsageHistory, 0) + for rows.Next() { + var date string + var requests int64 + var tokens int64 + var cost float64 + var actualCost float64 + if err := rows.Scan(&date, &requests, &tokens, &cost, &actualCost); err != nil { + return nil, err + } + t, _ := time.Parse("2006-01-02", date) history = append(history, AccountUsageHistory{ - Date: h.Date, - Label: label, - Requests: h.Requests, - Tokens: h.Tokens, - Cost: h.Cost, - ActualCost: h.ActualCost, + Date: date, + Label: t.Format("01/02"), + Requests: requests, + Tokens: tokens, + Cost: cost, + ActualCost: actualCost, }) } + if err := rows.Err(); err != nil { + return nil, err + } - // Calculate summary var totalActualCost, totalStandardCost float64 var totalRequests, totalTokens int64 var highestCostDay, highestRequestDay *AccountUsageHistory @@ -1084,14 +1109,11 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID actualDaysUsed = 1 } - // Get average duration - var avgDuration struct { - AvgDurationMs float64 `gorm:"column:avg_duration_ms"` + avgQuery := "SELECT COALESCE(AVG(duration_ms), 0) as avg_duration_ms FROM usage_logs WHERE account_id = $1 AND created_at >= $2 AND created_at < $3" + var avgDuration float64 + if err := r.sql.QueryRowContext(ctx, avgQuery, accountID, startTime, endTime).Scan(&avgDuration); err != nil { + return nil, err } - r.db.WithContext(ctx).Model(&usageLogModel{}). - Select("COALESCE(AVG(duration_ms), 0) as avg_duration_ms"). - Where("account_id = ? AND created_at >= ? AND created_at < ?", accountID, startTime, endTime). - Scan(&avgDuration) summary := AccountUsageSummary{ Days: daysCount, @@ -1103,10 +1125,9 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID AvgDailyCost: totalActualCost / float64(actualDaysUsed), AvgDailyRequests: float64(totalRequests) / float64(actualDaysUsed), AvgDailyTokens: float64(totalTokens) / float64(actualDaysUsed), - AvgDurationMs: avgDuration.AvgDurationMs, + AvgDurationMs: avgDuration, } - // Set today's stats todayStr := timezone.Now().Format("2006-01-02") for i := range history { if history[i].Date == todayStr { @@ -1125,7 +1146,6 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID } } - // Set highest cost day if highestCostDay != nil { summary.HighestCostDay = &struct { Date string `json:"date"` @@ -1140,7 +1160,6 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID } } - // Set highest request day if highestRequestDay != nil { summary.HighestRequestDay = &struct { Date string `json:"date"` @@ -1155,7 +1174,6 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID } } - // Get model statistics using the unified method models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID) if err != nil { models = []ModelStat{} @@ -1168,136 +1186,390 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID }, nil } -type usageLogModel struct { - ID int64 `gorm:"primaryKey"` - UserID int64 `gorm:"index;not null"` - ApiKeyID int64 `gorm:"index;not null"` - AccountID int64 `gorm:"index;not null"` - RequestID string `gorm:"size:64"` - Model string `gorm:"size:100;index;not null"` +func (r *usageLogRepository) listUsageLogsWithPagination(ctx context.Context, whereClause string, args []any, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + countQuery := "SELECT COUNT(*) FROM usage_logs " + whereClause + var total int64 + if err := r.sql.QueryRowContext(ctx, countQuery, args...).Scan(&total); err != nil { + return nil, nil, err + } - GroupID *int64 `gorm:"index"` - SubscriptionID *int64 `gorm:"index"` - - InputTokens int `gorm:"default:0;not null"` - OutputTokens int `gorm:"default:0;not null"` - CacheCreationTokens int `gorm:"default:0;not null"` - CacheReadTokens int `gorm:"default:0;not null"` - - CacheCreation5mTokens int `gorm:"default:0;not null"` - CacheCreation1hTokens int `gorm:"default:0;not null"` - - InputCost float64 `gorm:"type:decimal(20,10);default:0;not null"` - OutputCost float64 `gorm:"type:decimal(20,10);default:0;not null"` - CacheCreationCost float64 `gorm:"type:decimal(20,10);default:0;not null"` - CacheReadCost float64 `gorm:"type:decimal(20,10);default:0;not null"` - TotalCost float64 `gorm:"type:decimal(20,10);default:0;not null"` - ActualCost float64 `gorm:"type:decimal(20,10);default:0;not null"` - RateMultiplier float64 `gorm:"type:decimal(10,4);default:1;not null"` - - BillingType int8 `gorm:"type:smallint;default:0;not null"` - Stream bool `gorm:"default:false;not null"` - DurationMs *int - FirstTokenMs *int - - CreatedAt time.Time `gorm:"index;not null"` - - User *userModel `gorm:"foreignKey:UserID"` - ApiKey *apiKeyModel `gorm:"foreignKey:ApiKeyID"` - Account *accountModel `gorm:"foreignKey:AccountID"` - Group *groupModel `gorm:"foreignKey:GroupID"` - Subscription *userSubscriptionModel `gorm:"foreignKey:SubscriptionID"` + limitPos := len(args) + 1 + offsetPos := len(args) + 2 + listArgs := append(append([]any{}, args...), params.Limit(), params.Offset()) + query := fmt.Sprintf("SELECT %s FROM usage_logs %s ORDER BY id DESC LIMIT $%d OFFSET $%d", usageLogSelectColumns, whereClause, limitPos, offsetPos) + logs, err := r.queryUsageLogs(ctx, query, listArgs...) + if err != nil { + return nil, nil, err + } + return logs, paginationResultFromTotal(total, params), nil } -func (usageLogModel) TableName() string { return "usage_logs" } +func (r *usageLogRepository) queryUsageLogs(ctx context.Context, query string, args ...any) ([]service.UsageLog, error) { + rows, err := r.sql.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer rows.Close() -func usageLogModelToService(m *usageLogModel) *service.UsageLog { - if m == nil { + logs := make([]service.UsageLog, 0) + for rows.Next() { + log, err := scanUsageLog(rows) + if err != nil { + return nil, err + } + logs = append(logs, *log) + } + if err := rows.Err(); err != nil { + return nil, err + } + return logs, nil +} + +func (r *usageLogRepository) hydrateUsageLogAssociations(ctx context.Context, logs []service.UsageLog) error { + // 关联数据使用 Ent 批量加载,避免把复杂 SQL 继续膨胀。 + if len(logs) == 0 { return nil } - return &service.UsageLog{ - ID: m.ID, - UserID: m.UserID, - ApiKeyID: m.ApiKeyID, - AccountID: m.AccountID, - RequestID: m.RequestID, - Model: m.Model, - GroupID: m.GroupID, - SubscriptionID: m.SubscriptionID, - InputTokens: m.InputTokens, - OutputTokens: m.OutputTokens, - CacheCreationTokens: m.CacheCreationTokens, - CacheReadTokens: m.CacheReadTokens, - CacheCreation5mTokens: m.CacheCreation5mTokens, - CacheCreation1hTokens: m.CacheCreation1hTokens, - InputCost: m.InputCost, - OutputCost: m.OutputCost, - CacheCreationCost: m.CacheCreationCost, - CacheReadCost: m.CacheReadCost, - TotalCost: m.TotalCost, - ActualCost: m.ActualCost, - RateMultiplier: m.RateMultiplier, - BillingType: m.BillingType, - Stream: m.Stream, - DurationMs: m.DurationMs, - FirstTokenMs: m.FirstTokenMs, - CreatedAt: m.CreatedAt, - User: userModelToService(m.User), - ApiKey: apiKeyModelToService(m.ApiKey), - Account: accountModelToService(m.Account), - Group: groupModelToService(m.Group), - Subscription: userSubscriptionModelToService(m.Subscription), + + ids := collectUsageLogIDs(logs) + users, err := r.loadUsers(ctx, ids.userIDs) + if err != nil { + return err + } + apiKeys, err := r.loadApiKeys(ctx, ids.apiKeyIDs) + if err != nil { + return err + } + accounts, err := r.loadAccounts(ctx, ids.accountIDs) + if err != nil { + return err + } + groups, err := r.loadGroups(ctx, ids.groupIDs) + if err != nil { + return err + } + subs, err := r.loadSubscriptions(ctx, ids.subscriptionIDs) + if err != nil { + return err + } + + for i := range logs { + if user, ok := users[logs[i].UserID]; ok { + logs[i].User = user + } + if key, ok := apiKeys[logs[i].ApiKeyID]; ok { + logs[i].ApiKey = key + } + if acc, ok := accounts[logs[i].AccountID]; ok { + logs[i].Account = acc + } + if logs[i].GroupID != nil { + if group, ok := groups[*logs[i].GroupID]; ok { + logs[i].Group = group + } + } + if logs[i].SubscriptionID != nil { + if sub, ok := subs[*logs[i].SubscriptionID]; ok { + logs[i].Subscription = sub + } + } + } + return nil +} + +type usageLogIDs struct { + userIDs []int64 + apiKeyIDs []int64 + accountIDs []int64 + groupIDs []int64 + subscriptionIDs []int64 +} + +func collectUsageLogIDs(logs []service.UsageLog) usageLogIDs { + idSet := func() map[int64]struct{} { return make(map[int64]struct{}) } + + userIDs := idSet() + apiKeyIDs := idSet() + accountIDs := idSet() + groupIDs := idSet() + subscriptionIDs := idSet() + + for i := range logs { + userIDs[logs[i].UserID] = struct{}{} + apiKeyIDs[logs[i].ApiKeyID] = struct{}{} + accountIDs[logs[i].AccountID] = struct{}{} + if logs[i].GroupID != nil { + groupIDs[*logs[i].GroupID] = struct{}{} + } + if logs[i].SubscriptionID != nil { + subscriptionIDs[*logs[i].SubscriptionID] = struct{}{} + } + } + + return usageLogIDs{ + userIDs: setToSlice(userIDs), + apiKeyIDs: setToSlice(apiKeyIDs), + accountIDs: setToSlice(accountIDs), + groupIDs: setToSlice(groupIDs), + subscriptionIDs: setToSlice(subscriptionIDs), } } -func usageLogModelsToService(models []usageLogModel) []service.UsageLog { - out := make([]service.UsageLog, 0, len(models)) - for i := range models { - if s := usageLogModelToService(&models[i]); s != nil { - out = append(out, *s) +func (r *usageLogRepository) loadUsers(ctx context.Context, ids []int64) (map[int64]*service.User, error) { + out := make(map[int64]*service.User) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.User.Query().Where(dbuser.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = userEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadApiKeys(ctx context.Context, ids []int64) (map[int64]*service.ApiKey, error) { + out := make(map[int64]*service.ApiKey) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.ApiKey.Query().Where(dbapikey.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = apiKeyEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadAccounts(ctx context.Context, ids []int64) (map[int64]*service.Account, error) { + out := make(map[int64]*service.Account) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.Account.Query().Where(dbaccount.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = accountEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadGroups(ctx context.Context, ids []int64) (map[int64]*service.Group, error) { + out := make(map[int64]*service.Group) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.Group.Query().Where(dbgroup.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = groupEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadSubscriptions(ctx context.Context, ids []int64) (map[int64]*service.UserSubscription, error) { + out := make(map[int64]*service.UserSubscription) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.UserSubscription.Query().Where(dbusersub.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = userSubscriptionEntityToService(m) + } + return out, nil +} + +func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, error) { + var ( + id int64 + userID int64 + apiKeyID int64 + accountID int64 + requestID sql.NullString + model string + groupID sql.NullInt64 + subscriptionID sql.NullInt64 + inputTokens int + outputTokens int + cacheCreationTokens int + cacheReadTokens int + cacheCreation5m int + cacheCreation1h int + inputCost float64 + outputCost float64 + cacheCreationCost float64 + cacheReadCost float64 + totalCost float64 + actualCost float64 + rateMultiplier float64 + billingType int16 + stream bool + durationMs sql.NullInt64 + firstTokenMs sql.NullInt64 + createdAt time.Time + ) + + if err := scanner.Scan( + &id, + &userID, + &apiKeyID, + &accountID, + &requestID, + &model, + &groupID, + &subscriptionID, + &inputTokens, + &outputTokens, + &cacheCreationTokens, + &cacheReadTokens, + &cacheCreation5m, + &cacheCreation1h, + &inputCost, + &outputCost, + &cacheCreationCost, + &cacheReadCost, + &totalCost, + &actualCost, + &rateMultiplier, + &billingType, + &stream, + &durationMs, + &firstTokenMs, + &createdAt, + ); err != nil { + return nil, err + } + + log := &service.UsageLog{ + ID: id, + UserID: userID, + ApiKeyID: apiKeyID, + AccountID: accountID, + Model: model, + InputTokens: inputTokens, + OutputTokens: outputTokens, + CacheCreationTokens: cacheCreationTokens, + CacheReadTokens: cacheReadTokens, + CacheCreation5mTokens: cacheCreation5m, + CacheCreation1hTokens: cacheCreation1h, + InputCost: inputCost, + OutputCost: outputCost, + CacheCreationCost: cacheCreationCost, + CacheReadCost: cacheReadCost, + TotalCost: totalCost, + ActualCost: actualCost, + RateMultiplier: rateMultiplier, + BillingType: int8(billingType), + Stream: stream, + CreatedAt: createdAt, + } + + if requestID.Valid { + log.RequestID = requestID.String + } + if groupID.Valid { + value := groupID.Int64 + log.GroupID = &value + } + if subscriptionID.Valid { + value := subscriptionID.Int64 + log.SubscriptionID = &value + } + if durationMs.Valid { + value := int(durationMs.Int64) + log.DurationMs = &value + } + if firstTokenMs.Valid { + value := int(firstTokenMs.Int64) + log.FirstTokenMs = &value + } + + return log, nil +} + +func scanTrendRows(rows *sql.Rows) ([]TrendDataPoint, error) { + results := make([]TrendDataPoint, 0) + for rows.Next() { + var row TrendDataPoint + if err := rows.Scan( + &row.Date, + &row.Requests, + &row.InputTokens, + &row.OutputTokens, + &row.CacheTokens, + &row.TotalTokens, + &row.Cost, + &row.ActualCost, + ); err != nil { + return nil, err } + results = append(results, row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return results, nil +} + +func scanModelStatsRows(rows *sql.Rows) ([]ModelStat, error) { + results := make([]ModelStat, 0) + for rows.Next() { + var row ModelStat + if err := rows.Scan( + &row.Model, + &row.Requests, + &row.InputTokens, + &row.OutputTokens, + &row.TotalTokens, + &row.Cost, + &row.ActualCost, + ); err != nil { + return nil, err + } + results = append(results, row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return results, nil +} + +func buildWhere(conditions []string) string { + if len(conditions) == 0 { + return "" + } + return "WHERE " + strings.Join(conditions, " AND ") +} + +func nullInt64(v *int64) sql.NullInt64 { + if v == nil { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: *v, Valid: true} +} + +func nullInt(v *int) sql.NullInt64 { + if v == nil { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: int64(*v), Valid: true} +} + +func setToSlice(set map[int64]struct{}) []int64 { + out := make([]int64, 0, len(set)) + for id := range set { + out = append(out, id) } return out } - -func usageLogModelFromService(log *service.UsageLog) *usageLogModel { - if log == nil { - return nil - } - return &usageLogModel{ - ID: log.ID, - UserID: log.UserID, - ApiKeyID: log.ApiKeyID, - AccountID: log.AccountID, - RequestID: log.RequestID, - Model: log.Model, - GroupID: log.GroupID, - SubscriptionID: log.SubscriptionID, - InputTokens: log.InputTokens, - OutputTokens: log.OutputTokens, - CacheCreationTokens: log.CacheCreationTokens, - CacheReadTokens: log.CacheReadTokens, - CacheCreation5mTokens: log.CacheCreation5mTokens, - CacheCreation1hTokens: log.CacheCreation1hTokens, - InputCost: log.InputCost, - OutputCost: log.OutputCost, - CacheCreationCost: log.CacheCreationCost, - CacheReadCost: log.CacheReadCost, - TotalCost: log.TotalCost, - ActualCost: log.ActualCost, - RateMultiplier: log.RateMultiplier, - BillingType: log.BillingType, - Stream: log.Stream, - DurationMs: log.DurationMs, - FirstTokenMs: log.FirstTokenMs, - CreatedAt: log.CreatedAt, - } -} - -func applyUsageLogModelToService(log *service.UsageLog, m *usageLogModel) { - if log == nil || m == nil { - return - } - log.ID = m.ID - log.CreatedAt = m.CreatedAt -} diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go index 4533a0ab..4ef5fa56 100644 --- a/backend/internal/repository/usage_log_repo_integration_test.go +++ b/backend/internal/repository/usage_log_repo_integration_test.go @@ -4,35 +4,39 @@ package repository import ( "context" + "database/sql" "testing" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type UsageLogRepoSuite struct { suite.Suite - ctx context.Context - db *gorm.DB - repo *usageLogRepository + ctx context.Context + tx *sql.Tx + client *dbent.Client + repo *usageLogRepository } func (s *UsageLogRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewUsageLogRepository(s.db).(*usageLogRepository) + client, tx := testEntSQLTx(s.T()) + s.client = client + s.tx = tx + s.repo = newUsageLogRepositoryWithSQL(client, tx) } func TestUsageLogRepoSuite(t *testing.T) { suite.Run(t, new(UsageLogRepoSuite)) } -func (s *UsageLogRepoSuite) createUsageLog(user *userModel, apiKey *apiKeyModel, account *accountModel, inputTokens, outputTokens int, cost float64, createdAt time.Time) *service.UsageLog { +func (s *UsageLogRepoSuite) createUsageLog(user *service.User, apiKey *service.ApiKey, account *service.Account, inputTokens, outputTokens int, cost float64, createdAt time.Time) *service.UsageLog { log := &service.UsageLog{ UserID: user.ID, ApiKeyID: apiKey.ID, @@ -51,9 +55,9 @@ func (s *UsageLogRepoSuite) createUsageLog(user *userModel, apiKey *apiKeyModel, // --- Create / GetByID --- func (s *UsageLogRepoSuite) TestCreate() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "create@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-create", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-create"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "create@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-create", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-create"}) log := &service.UsageLog{ UserID: user.ID, @@ -72,9 +76,9 @@ func (s *UsageLogRepoSuite) TestCreate() { } func (s *UsageLogRepoSuite) TestGetByID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "getbyid@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-getbyid", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-getbyid"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "getbyid@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-getbyid", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-getbyid"}) log := s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) @@ -92,9 +96,9 @@ func (s *UsageLogRepoSuite) TestGetByID_NotFound() { // --- Delete --- func (s *UsageLogRepoSuite) TestDelete() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "delete@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-delete", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-delete"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "delete@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-delete", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-delete"}) log := s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) @@ -108,9 +112,9 @@ func (s *UsageLogRepoSuite) TestDelete() { // --- ListByUser --- func (s *UsageLogRepoSuite) TestListByUser() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listbyuser@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-listbyuser", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-listbyuser"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "listbyuser@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-listbyuser", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-listbyuser"}) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) s.createUsageLog(user, apiKey, account, 15, 25, 0.6, time.Now()) @@ -124,9 +128,9 @@ func (s *UsageLogRepoSuite) TestListByUser() { // --- ListByApiKey --- func (s *UsageLogRepoSuite) TestListByApiKey() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listbyapikey@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-listbyapikey", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-listbyapikey"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "listbyapikey@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-listbyapikey", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-listbyapikey"}) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) s.createUsageLog(user, apiKey, account, 15, 25, 0.6, time.Now()) @@ -140,9 +144,9 @@ func (s *UsageLogRepoSuite) TestListByApiKey() { // --- ListByAccount --- func (s *UsageLogRepoSuite) TestListByAccount() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listbyaccount@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-listbyaccount", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-listbyaccount"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "listbyaccount@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-listbyaccount", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-listbyaccount"}) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) @@ -155,9 +159,9 @@ func (s *UsageLogRepoSuite) TestListByAccount() { // --- GetUserStats --- func (s *UsageLogRepoSuite) TestGetUserStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "userstats@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-userstats", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-userstats"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "userstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-userstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-userstats"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -175,9 +179,9 @@ func (s *UsageLogRepoSuite) TestGetUserStats() { // --- ListWithFilters --- func (s *UsageLogRepoSuite) TestListWithFilters() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "filters@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-filters", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-filters"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filters@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-filters", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filters"}) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) @@ -194,26 +198,26 @@ func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { now := time.Now() todayStart := timezone.Today() - userToday := mustCreateUser(s.T(), s.db, &userModel{ + userToday := mustCreateUser(s.T(), s.client, &service.User{ Email: "today@example.com", CreatedAt: maxTime(todayStart.Add(10*time.Second), now.Add(-10*time.Second)), UpdatedAt: now, }) - userOld := mustCreateUser(s.T(), s.db, &userModel{ + userOld := mustCreateUser(s.T(), s.client, &service.User{ Email: "old@example.com", CreatedAt: todayStart.Add(-24 * time.Hour), UpdatedAt: todayStart.Add(-24 * time.Hour), }) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-ul"}) - apiKey1 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: userToday.ID, Key: "sk-ul-1", Name: "ul1"}) - mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: userOld.ID, Key: "sk-ul-2", Name: "ul2", Status: service.StatusDisabled}) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-ul"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: userToday.ID, Key: "sk-ul-1", Name: "ul1"}) + mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: userOld.ID, Key: "sk-ul-2", Name: "ul2", Status: service.StatusDisabled}) resetAt := now.Add(10 * time.Minute) - accNormal := mustCreateAccount(s.T(), s.db, &accountModel{Name: "a-normal", Schedulable: true}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a-error", Status: service.StatusError, Schedulable: true}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a-rl", RateLimitedAt: &now, RateLimitResetAt: &resetAt, Schedulable: true}) - mustCreateAccount(s.T(), s.db, &accountModel{Name: "a-ov", OverloadUntil: &resetAt, Schedulable: true}) + accNormal := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-normal", Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-error", Status: service.StatusError, Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-rl", RateLimitedAt: &now, RateLimitResetAt: &resetAt, Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-ov", OverloadUntil: &resetAt, Schedulable: true}) d1, d2, d3 := 100, 200, 300 logToday := &service.UsageLog{ @@ -285,7 +289,8 @@ func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { s.Require().GreaterOrEqual(stats.TodayRequests, int64(1), "expected TodayRequests >= 1") s.Require().GreaterOrEqual(stats.TodayCost, 0.0, "expected TodayCost >= 0") - wantRpm, wantTpm := s.repo.getPerformanceStats(s.ctx, 0) + wantRpm, wantTpm, err := s.repo.getPerformanceStats(s.ctx, 0) + s.Require().NoError(err, "getPerformanceStats") s.Require().Equal(wantRpm, stats.Rpm, "Rpm mismatch") s.Require().Equal(wantTpm, stats.Tpm, "Tpm mismatch") } @@ -293,9 +298,9 @@ func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { // --- GetUserDashboardStats --- func (s *UsageLogRepoSuite) TestGetUserDashboardStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "userdash@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-userdash", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-userdash"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "userdash@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-userdash", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-userdash"}) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) @@ -308,9 +313,9 @@ func (s *UsageLogRepoSuite) TestGetUserDashboardStats() { // --- GetAccountTodayStats --- func (s *UsageLogRepoSuite) TestGetAccountTodayStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "acctoday@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-acctoday", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-today"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "acctoday@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-acctoday", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-today"}) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) @@ -323,11 +328,11 @@ func (s *UsageLogRepoSuite) TestGetAccountTodayStats() { // --- GetBatchUserUsageStats --- func (s *UsageLogRepoSuite) TestGetBatchUserUsageStats() { - user1 := mustCreateUser(s.T(), s.db, &userModel{Email: "batch1@test.com"}) - user2 := mustCreateUser(s.T(), s.db, &userModel{Email: "batch2@test.com"}) - apiKey1 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user1.ID, Key: "sk-batch1", Name: "k"}) - apiKey2 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user2.ID, Key: "sk-batch2", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-batch"}) + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "batch1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "batch2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user1.ID, Key: "sk-batch1", Name: "k"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user2.ID, Key: "sk-batch2", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-batch"}) s.createUsageLog(user1, apiKey1, account, 10, 20, 0.5, time.Now()) s.createUsageLog(user2, apiKey2, account, 15, 25, 0.6, time.Now()) @@ -348,10 +353,10 @@ func (s *UsageLogRepoSuite) TestGetBatchUserUsageStats_Empty() { // --- GetBatchApiKeyUsageStats --- func (s *UsageLogRepoSuite) TestGetBatchApiKeyUsageStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "batchkey@test.com"}) - apiKey1 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-batchkey1", Name: "k1"}) - apiKey2 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-batchkey2", Name: "k2"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-batchkey"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "batchkey@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-batchkey1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-batchkey2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-batchkey"}) s.createUsageLog(user, apiKey1, account, 10, 20, 0.5, time.Now()) s.createUsageLog(user, apiKey2, account, 15, 25, 0.6, time.Now()) @@ -370,9 +375,9 @@ func (s *UsageLogRepoSuite) TestGetBatchApiKeyUsageStats_Empty() { // --- GetGlobalStats --- func (s *UsageLogRepoSuite) TestGetGlobalStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "global@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-global", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-global"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "global@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-global", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-global"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -395,9 +400,9 @@ func maxTime(a, b time.Time) time.Time { // --- ListByUserAndTimeRange --- func (s *UsageLogRepoSuite) TestListByUserAndTimeRange() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "timerange@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-timerange", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-timerange"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "timerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-timerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-timerange"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -414,9 +419,9 @@ func (s *UsageLogRepoSuite) TestListByUserAndTimeRange() { // --- ListByApiKeyAndTimeRange --- func (s *UsageLogRepoSuite) TestListByApiKeyAndTimeRange() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "keytimerange@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-keytimerange", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-keytimerange"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "keytimerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-keytimerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-keytimerange"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -433,9 +438,9 @@ func (s *UsageLogRepoSuite) TestListByApiKeyAndTimeRange() { // --- ListByAccountAndTimeRange --- func (s *UsageLogRepoSuite) TestListByAccountAndTimeRange() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "acctimerange@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-acctimerange", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-acctimerange"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "acctimerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-acctimerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-acctimerange"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -452,9 +457,9 @@ func (s *UsageLogRepoSuite) TestListByAccountAndTimeRange() { // --- ListByModelAndTimeRange --- func (s *UsageLogRepoSuite) TestListByModelAndTimeRange() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "modeltimerange@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-modeltimerange", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-modeltimerange"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "modeltimerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-modeltimerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-modeltimerange"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) @@ -508,9 +513,9 @@ func (s *UsageLogRepoSuite) TestListByModelAndTimeRange() { // --- GetAccountWindowStats --- func (s *UsageLogRepoSuite) TestGetAccountWindowStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "windowstats@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-windowstats", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-windowstats"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "windowstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-windowstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-windowstats"}) now := time.Now() windowStart := now.Add(-10 * time.Minute) @@ -528,9 +533,9 @@ func (s *UsageLogRepoSuite) TestGetAccountWindowStats() { // --- GetUserUsageTrendByUserID --- func (s *UsageLogRepoSuite) TestGetUserUsageTrendByUserID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "usertrend@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-usertrend", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-usertrend"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrend@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-usertrend", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-usertrend"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -545,9 +550,9 @@ func (s *UsageLogRepoSuite) TestGetUserUsageTrendByUserID() { } func (s *UsageLogRepoSuite) TestGetUserUsageTrendByUserID_HourlyGranularity() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "usertrendhourly@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-usertrendhourly", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-usertrendhourly"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrendhourly@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-usertrendhourly", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-usertrendhourly"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -564,9 +569,9 @@ func (s *UsageLogRepoSuite) TestGetUserUsageTrendByUserID_HourlyGranularity() { // --- GetUserModelStats --- func (s *UsageLogRepoSuite) TestGetUserModelStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "modelstats@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-modelstats", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-modelstats"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "modelstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-modelstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-modelstats"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) @@ -611,9 +616,9 @@ func (s *UsageLogRepoSuite) TestGetUserModelStats() { // --- GetUsageTrendWithFilters --- func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "trendfilters@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-trendfilters", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-trendfilters"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "trendfilters@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-trendfilters", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-trendfilters"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -639,9 +644,9 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() { } func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "trendfilters-h@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-trendfilters-h", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-trendfilters-h"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "trendfilters-h@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-trendfilters-h", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-trendfilters-h"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -658,9 +663,9 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() { // --- GetModelStatsWithFilters --- func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "modelfilters@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-modelfilters", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-modelfilters"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "modelfilters@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-modelfilters", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-modelfilters"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) @@ -712,9 +717,9 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() { // --- GetAccountUsageStats --- func (s *UsageLogRepoSuite) TestGetAccountUsageStats() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "accstats@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-accstats", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-accstats"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "accstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-accstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-accstats"}) base := time.Date(2025, 1, 15, 0, 0, 0, 0, time.UTC) @@ -758,7 +763,7 @@ func (s *UsageLogRepoSuite) TestGetAccountUsageStats() { } func (s *UsageLogRepoSuite) TestGetAccountUsageStats_EmptyRange() { - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-emptystats"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-emptystats"}) base := time.Date(2025, 1, 15, 0, 0, 0, 0, time.UTC) startTime := base @@ -774,11 +779,11 @@ func (s *UsageLogRepoSuite) TestGetAccountUsageStats_EmptyRange() { // --- GetUserUsageTrend --- func (s *UsageLogRepoSuite) TestGetUserUsageTrend() { - user1 := mustCreateUser(s.T(), s.db, &userModel{Email: "usertrend1@test.com"}) - user2 := mustCreateUser(s.T(), s.db, &userModel{Email: "usertrend2@test.com"}) - apiKey1 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user1.ID, Key: "sk-usertrend1", Name: "k1"}) - apiKey2 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user2.ID, Key: "sk-usertrend2", Name: "k2"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-usertrends"}) + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrend1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrend2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user1.ID, Key: "sk-usertrend1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user2.ID, Key: "sk-usertrend2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-usertrends"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user1, apiKey1, account, 100, 200, 1.0, base) @@ -796,10 +801,10 @@ func (s *UsageLogRepoSuite) TestGetUserUsageTrend() { // --- GetApiKeyUsageTrend --- func (s *UsageLogRepoSuite) TestGetApiKeyUsageTrend() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "keytrend@test.com"}) - apiKey1 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-keytrend1", Name: "k1"}) - apiKey2 := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-keytrend2", Name: "k2"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-keytrends"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "keytrend@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-keytrend1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-keytrend2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-keytrends"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey1, account, 100, 200, 1.0, base) @@ -815,9 +820,9 @@ func (s *UsageLogRepoSuite) TestGetApiKeyUsageTrend() { } func (s *UsageLogRepoSuite) TestGetApiKeyUsageTrend_HourlyGranularity() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "keytrendh@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-keytrendh", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-keytrendh"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "keytrendh@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-keytrendh", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-keytrendh"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 100, 200, 1.0, base) @@ -834,9 +839,9 @@ func (s *UsageLogRepoSuite) TestGetApiKeyUsageTrend_HourlyGranularity() { // --- ListWithFilters (additional filter tests) --- func (s *UsageLogRepoSuite) TestListWithFilters_ApiKeyFilter() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "filterskey@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-filterskey", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-filterskey"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filterskey@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-filterskey", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filterskey"}) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) @@ -848,9 +853,9 @@ func (s *UsageLogRepoSuite) TestListWithFilters_ApiKeyFilter() { } func (s *UsageLogRepoSuite) TestListWithFilters_TimeRange() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "filterstime@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-filterstime", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-filterstime"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filterstime@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-filterstime", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filterstime"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) @@ -867,9 +872,9 @@ func (s *UsageLogRepoSuite) TestListWithFilters_TimeRange() { } func (s *UsageLogRepoSuite) TestListWithFilters_CombinedFilters() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "filterscombined@test.com"}) - apiKey := mustCreateApiKey(s.T(), s.db, &apiKeyModel{UserID: user.ID, Key: "sk-filterscombined", Name: "k"}) - account := mustCreateAccount(s.T(), s.db, &accountModel{Name: "acc-filterscombined"}) + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filterscombined@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.ApiKey{UserID: user.ID, Key: "sk-filterscombined", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filterscombined"}) base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) diff --git a/backend/internal/repository/user_repo.go b/backend/internal/repository/user_repo.go index 37e1e173..be8cfb56 100644 --- a/backend/internal/repository/user_repo.go +++ b/backend/internal/repository/user_repo.go @@ -2,252 +2,412 @@ package repository import ( "context" - "time" - - "github.com/Wei-Shaw/sub2api/internal/service" + "database/sql" + "sort" + dbent "github.com/Wei-Shaw/sub2api/ent" + dbuser "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" - + "github.com/Wei-Shaw/sub2api/internal/service" "github.com/lib/pq" - "gorm.io/gorm" ) type userRepository struct { - db *gorm.DB + client *dbent.Client + sql sqlExecutor + begin sqlBeginner } -func NewUserRepository(db *gorm.DB) service.UserRepository { - return &userRepository{db: db} +func NewUserRepository(client *dbent.Client, sqlDB *sql.DB) service.UserRepository { + return newUserRepositoryWithSQL(client, sqlDB) } -func (r *userRepository) Create(ctx context.Context, user *service.User) error { - m := userModelFromService(user) - err := r.db.WithContext(ctx).Create(m).Error - if err == nil { - applyUserModelToService(user, m) +func newUserRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *userRepository { + var beginner sqlBeginner + if b, ok := sqlq.(sqlBeginner); ok { + beginner = b } - return translatePersistenceError(err, nil, service.ErrEmailExists) + return &userRepository{client: client, sql: sqlq, begin: beginner} +} + +func (r *userRepository) Create(ctx context.Context, userIn *service.User) error { + if userIn == nil { + return nil + } + + exec := r.sql + txClient := r.client + var sqlTx *sql.Tx + var txClientClose func() error + + if r.begin != nil { + var err error + sqlTx, err = r.begin.BeginTx(ctx, nil) + if err != nil { + return err + } + exec = sqlTx + txClient = entClientFromSQLTx(sqlTx) + txClientClose = txClient.Close + defer func() { _ = sqlTx.Rollback() }() + } + if txClientClose != nil { + defer func() { _ = txClientClose() }() + } + + created, err := txClient.User.Create(). + SetEmail(userIn.Email). + SetUsername(userIn.Username). + SetWechat(userIn.Wechat). + SetNotes(userIn.Notes). + SetPasswordHash(userIn.PasswordHash). + SetRole(userIn.Role). + SetBalance(userIn.Balance). + SetConcurrency(userIn.Concurrency). + SetStatus(userIn.Status). + Save(ctx) + if err != nil { + return translatePersistenceError(err, nil, service.ErrEmailExists) + } + + if err := r.syncUserAllowedGroups(ctx, txClient, exec, created.ID, userIn.AllowedGroups); err != nil { + return err + } + + if sqlTx != nil { + if err := sqlTx.Commit(); err != nil { + return err + } + } + + applyUserEntityToService(userIn, created) + return nil } func (r *userRepository) GetByID(ctx context.Context, id int64) (*service.User, error) { - var m userModel - err := r.db.WithContext(ctx).First(&m, id).Error + m, err := r.client.User.Query().Where(dbuser.IDEQ(id)).Only(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrUserNotFound, nil) } - return userModelToService(&m), nil + + out := userEntityToService(m) + groups, err := r.loadAllowedGroups(ctx, []int64{id}) + if err == nil { + if v, ok := groups[id]; ok { + out.AllowedGroups = v + } + } + return out, nil } func (r *userRepository) GetByEmail(ctx context.Context, email string) (*service.User, error) { - var m userModel - err := r.db.WithContext(ctx).Where("email = ?", email).First(&m).Error + m, err := r.client.User.Query().Where(dbuser.EmailEQ(email)).Only(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrUserNotFound, nil) } - return userModelToService(&m), nil + + out := userEntityToService(m) + groups, err := r.loadAllowedGroups(ctx, []int64{m.ID}) + if err == nil { + if v, ok := groups[m.ID]; ok { + out.AllowedGroups = v + } + } + return out, nil } -func (r *userRepository) Update(ctx context.Context, user *service.User) error { - m := userModelFromService(user) - err := r.db.WithContext(ctx).Save(m).Error - if err == nil { - applyUserModelToService(user, m) +func (r *userRepository) Update(ctx context.Context, userIn *service.User) error { + if userIn == nil { + return nil } - return translatePersistenceError(err, nil, service.ErrEmailExists) + + exec := r.sql + txClient := r.client + var sqlTx *sql.Tx + var txClientClose func() error + + if r.begin != nil { + var err error + sqlTx, err = r.begin.BeginTx(ctx, nil) + if err != nil { + return err + } + exec = sqlTx + txClient = entClientFromSQLTx(sqlTx) + txClientClose = txClient.Close + defer func() { _ = sqlTx.Rollback() }() + } + if txClientClose != nil { + defer func() { _ = txClientClose() }() + } + + updated, err := txClient.User.UpdateOneID(userIn.ID). + SetEmail(userIn.Email). + SetUsername(userIn.Username). + SetWechat(userIn.Wechat). + SetNotes(userIn.Notes). + SetPasswordHash(userIn.PasswordHash). + SetRole(userIn.Role). + SetBalance(userIn.Balance). + SetConcurrency(userIn.Concurrency). + SetStatus(userIn.Status). + Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, service.ErrEmailExists) + } + + if err := r.syncUserAllowedGroups(ctx, txClient, exec, updated.ID, userIn.AllowedGroups); err != nil { + return err + } + + if sqlTx != nil { + if err := sqlTx.Commit(); err != nil { + return err + } + } + + userIn.UpdatedAt = updated.UpdatedAt + return nil } func (r *userRepository) Delete(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Delete(&userModel{}, id).Error + _, err := r.client.User.Delete().Where(dbuser.IDEQ(id)).Exec(ctx) + return err } func (r *userRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.User, *pagination.PaginationResult, error) { return r.ListWithFilters(ctx, params, "", "", "") } -// ListWithFilters lists users with optional filtering by status, role, and search query func (r *userRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, status, role, search string) ([]service.User, *pagination.PaginationResult, error) { - var users []userModel - var total int64 + q := r.client.User.Query() - db := r.db.WithContext(ctx).Model(&userModel{}) - - // Apply filters if status != "" { - db = db.Where("status = ?", status) + q = q.Where(dbuser.StatusEQ(status)) } if role != "" { - db = db.Where("role = ?", role) + q = q.Where(dbuser.RoleEQ(role)) } if search != "" { - searchPattern := "%" + search + "%" - db = db.Where( - "email ILIKE ? OR username ILIKE ? OR wechat ILIKE ?", - searchPattern, searchPattern, searchPattern, + q = q.Where( + dbuser.Or( + dbuser.EmailContainsFold(search), + dbuser.UsernameContainsFold(search), + dbuser.WechatContainsFold(search), + ), ) } - if err := db.Count(&total).Error; err != nil { + total, err := q.Clone().Count(ctx) + if err != nil { return nil, nil, err } - // Query users with pagination (reuse the same db with filters applied) - if err := db.Offset(params.Offset()).Limit(params.Limit()).Order("id DESC").Find(&users).Error; err != nil { + users, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(dbuser.FieldID)). + All(ctx) + if err != nil { return nil, nil, err } - // Batch load subscriptions for all users (avoid N+1) - if len(users) > 0 { - userIDs := make([]int64, len(users)) - userMap := make(map[int64]*service.User, len(users)) - outUsers := make([]service.User, 0, len(users)) - for i := range users { - userIDs[i] = users[i].ID - u := userModelToService(&users[i]) - outUsers = append(outUsers, *u) - userMap[u.ID] = &outUsers[len(outUsers)-1] - } - - // Query active subscriptions with groups in one query - var subscriptions []userSubscriptionModel - if err := r.db.WithContext(ctx). - Preload("Group"). - Where("user_id IN ? AND status = ?", userIDs, service.SubscriptionStatusActive). - Find(&subscriptions).Error; err != nil { - return nil, nil, err - } - - // Associate subscriptions with users - for i := range subscriptions { - if user, ok := userMap[subscriptions[i].UserID]; ok { - user.Subscriptions = append(user.Subscriptions, *userSubscriptionModelToService(&subscriptions[i])) - } - } - - return outUsers, paginationResultFromTotal(total, params), nil - } - outUsers := make([]service.User, 0, len(users)) - for i := range users { - outUsers = append(outUsers, *userModelToService(&users[i])) + if len(users) == 0 { + return outUsers, paginationResultFromTotal(int64(total), params), nil } - return outUsers, paginationResultFromTotal(total, params), nil + userIDs := make([]int64, 0, len(users)) + userMap := make(map[int64]*service.User, len(users)) + for i := range users { + userIDs = append(userIDs, users[i].ID) + u := userEntityToService(users[i]) + outUsers = append(outUsers, *u) + userMap[u.ID] = &outUsers[len(outUsers)-1] + } + + // Batch load active subscriptions with groups to avoid N+1. + subs, err := r.client.UserSubscription.Query(). + Where( + usersubscription.UserIDIn(userIDs...), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + ). + WithGroup(). + All(ctx) + if err != nil { + return nil, nil, err + } + + for i := range subs { + if u, ok := userMap[subs[i].UserID]; ok { + u.Subscriptions = append(u.Subscriptions, *userSubscriptionEntityToService(subs[i])) + } + } + + allowedGroupsByUser, err := r.loadAllowedGroups(ctx, userIDs) + if err == nil { + for id, u := range userMap { + if groups, ok := allowedGroupsByUser[id]; ok { + u.AllowedGroups = groups + } + } + } + + return outUsers, paginationResultFromTotal(int64(total), params), nil } func (r *userRepository) UpdateBalance(ctx context.Context, id int64, amount float64) error { - return r.db.WithContext(ctx).Model(&userModel{}).Where("id = ?", id). - Update("balance", gorm.Expr("balance + ?", amount)).Error + _, err := r.client.User.Update().Where(dbuser.IDEQ(id)).AddBalance(amount).Save(ctx) + return err } -// DeductBalance 扣减用户余额,仅当余额充足时执行 func (r *userRepository) DeductBalance(ctx context.Context, id int64, amount float64) error { - result := r.db.WithContext(ctx).Model(&userModel{}). - Where("id = ? AND balance >= ?", id, amount). - Update("balance", gorm.Expr("balance - ?", amount)) - if result.Error != nil { - return result.Error + n, err := r.client.User.Update(). + Where(dbuser.IDEQ(id), dbuser.BalanceGTE(amount)). + AddBalance(-amount). + Save(ctx) + if err != nil { + return err } - if result.RowsAffected == 0 { + if n == 0 { return service.ErrInsufficientBalance } return nil } func (r *userRepository) UpdateConcurrency(ctx context.Context, id int64, amount int) error { - return r.db.WithContext(ctx).Model(&userModel{}).Where("id = ?", id). - Update("concurrency", gorm.Expr("concurrency + ?", amount)).Error + _, err := r.client.User.Update().Where(dbuser.IDEQ(id)).AddConcurrency(amount).Save(ctx) + return err } func (r *userRepository) ExistsByEmail(ctx context.Context, email string) (bool, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&userModel{}).Where("email = ?", email).Count(&count).Error - return count > 0, err + return r.client.User.Query().Where(dbuser.EmailEQ(email)).Exist(ctx) } -// RemoveGroupFromAllowedGroups 从所有用户的 allowed_groups 数组中移除指定的分组ID -// 使用 PostgreSQL 的 array_remove 函数 func (r *userRepository) RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) { - result := r.db.WithContext(ctx).Model(&userModel{}). - Where("? = ANY(allowed_groups)", groupID). - Update("allowed_groups", gorm.Expr("array_remove(allowed_groups, ?)", groupID)) - return result.RowsAffected, result.Error + if r.sql == nil { + return 0, nil + } + + joinAffected, err := r.client.UserAllowedGroup.Delete(). + Where(userallowedgroup.GroupIDEQ(groupID)). + Exec(ctx) + if err != nil { + return 0, err + } + + arrayRes, err := r.sql.ExecContext( + ctx, + "UPDATE users SET allowed_groups = array_remove(allowed_groups, $1), updated_at = NOW() WHERE $1 = ANY(allowed_groups)", + groupID, + ) + if err != nil { + return 0, err + } + arrayAffected, _ := arrayRes.RowsAffected() + + if int64(joinAffected) > arrayAffected { + return int64(joinAffected), nil + } + return arrayAffected, nil } -// GetFirstAdmin 获取第一个管理员用户(用于 Admin API Key 认证) func (r *userRepository) GetFirstAdmin(ctx context.Context) (*service.User, error) { - var m userModel - err := r.db.WithContext(ctx). - Where("role = ? AND status = ?", service.RoleAdmin, service.StatusActive). - Order("id ASC"). - First(&m).Error + m, err := r.client.User.Query(). + Where( + dbuser.RoleEQ(service.RoleAdmin), + dbuser.StatusEQ(service.StatusActive), + ). + Order(dbent.Asc(dbuser.FieldID)). + First(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrUserNotFound, nil) } - return userModelToService(&m), nil + + out := userEntityToService(m) + groups, err := r.loadAllowedGroups(ctx, []int64{m.ID}) + if err == nil { + if v, ok := groups[m.ID]; ok { + out.AllowedGroups = v + } + } + return out, nil } -type userModel struct { - ID int64 `gorm:"primaryKey"` - Email string `gorm:"uniqueIndex;size:255;not null"` - Username string `gorm:"size:100;default:''"` - Wechat string `gorm:"size:100;default:''"` - Notes string `gorm:"type:text;default:''"` - PasswordHash string `gorm:"size:255;not null"` - Role string `gorm:"size:20;default:user;not null"` - Balance float64 `gorm:"type:decimal(20,8);default:0;not null"` - Concurrency int `gorm:"default:5;not null"` - Status string `gorm:"size:20;default:active;not null"` - AllowedGroups pq.Int64Array `gorm:"type:bigint[]"` - CreatedAt time.Time `gorm:"not null"` - UpdatedAt time.Time `gorm:"not null"` - DeletedAt gorm.DeletedAt `gorm:"index"` +func (r *userRepository) loadAllowedGroups(ctx context.Context, userIDs []int64) (map[int64][]int64, error) { + out := make(map[int64][]int64, len(userIDs)) + if len(userIDs) == 0 { + return out, nil + } + + rows, err := r.client.UserAllowedGroup.Query(). + Where(userallowedgroup.UserIDIn(userIDs...)). + All(ctx) + if err != nil { + return nil, err + } + + for i := range rows { + out[rows[i].UserID] = append(out[rows[i].UserID], rows[i].GroupID) + } + + for userID := range out { + sort.Slice(out[userID], func(i, j int) bool { return out[userID][i] < out[userID][j] }) + } + + return out, nil } -func (userModel) TableName() string { return "users" } - -func userModelToService(m *userModel) *service.User { - if m == nil { +func (r *userRepository) syncUserAllowedGroups(ctx context.Context, client *dbent.Client, exec sqlExecutor, userID int64, groupIDs []int64) error { + if client == nil || exec == nil { return nil } - return &service.User{ - ID: m.ID, - Email: m.Email, - Username: m.Username, - Wechat: m.Wechat, - Notes: m.Notes, - PasswordHash: m.PasswordHash, - Role: m.Role, - Balance: m.Balance, - Concurrency: m.Concurrency, - Status: m.Status, - AllowedGroups: []int64(m.AllowedGroups), - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, + + // Keep join table as the source of truth for reads. + if _, err := client.UserAllowedGroup.Delete().Where(userallowedgroup.UserIDEQ(userID)).Exec(ctx); err != nil { + return err } + + unique := make(map[int64]struct{}, len(groupIDs)) + for _, id := range groupIDs { + if id <= 0 { + continue + } + unique[id] = struct{}{} + } + + legacyGroups := make([]int64, 0, len(unique)) + if len(unique) > 0 { + creates := make([]*dbent.UserAllowedGroupCreate, 0, len(unique)) + for groupID := range unique { + creates = append(creates, client.UserAllowedGroup.Create().SetUserID(userID).SetGroupID(groupID)) + legacyGroups = append(legacyGroups, groupID) + } + if err := client.UserAllowedGroup. + CreateBulk(creates...). + OnConflictColumns(userallowedgroup.FieldUserID, userallowedgroup.FieldGroupID). + DoNothing(). + Exec(ctx); err != nil { + return err + } + } + + // Phase 1 compatibility: keep legacy users.allowed_groups array updated for existing raw SQL paths. + var legacy any + if len(legacyGroups) > 0 { + sort.Slice(legacyGroups, func(i, j int) bool { return legacyGroups[i] < legacyGroups[j] }) + legacy = pq.Array(legacyGroups) + } + if _, err := exec.ExecContext(ctx, "UPDATE users SET allowed_groups = $1::bigint[] WHERE id = $2", legacy, userID); err != nil { + return err + } + + return nil } -func userModelFromService(u *service.User) *userModel { - if u == nil { - return nil - } - return &userModel{ - ID: u.ID, - Email: u.Email, - Username: u.Username, - Wechat: u.Wechat, - Notes: u.Notes, - PasswordHash: u.PasswordHash, - Role: u.Role, - Balance: u.Balance, - Concurrency: u.Concurrency, - Status: u.Status, - AllowedGroups: pq.Int64Array(u.AllowedGroups), - CreatedAt: u.CreatedAt, - UpdatedAt: u.UpdatedAt, - } -} - -func applyUserModelToService(dst *service.User, src *userModel) { +func applyUserEntityToService(dst *service.User, src *dbent.User) { if dst == nil || src == nil { return } diff --git a/backend/internal/repository/user_repo_integration_test.go b/backend/internal/repository/user_repo_integration_test.go index cd5254ee..afb1fb6a 100644 --- a/backend/internal/repository/user_repo_integration_test.go +++ b/backend/internal/repository/user_repo_integration_test.go @@ -4,46 +4,103 @@ package repository import ( "context" + "database/sql" "testing" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" - "github.com/lib/pq" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type UserRepoSuite struct { suite.Suite - ctx context.Context - db *gorm.DB - repo *userRepository + ctx context.Context + tx *sql.Tx + client *dbent.Client + repo *userRepository } func (s *UserRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewUserRepository(s.db).(*userRepository) + entClient, tx := testEntSQLTx(s.T()) + s.tx = tx + s.client = entClient + s.repo = newUserRepositoryWithSQL(entClient, tx) } func TestUserRepoSuite(t *testing.T) { suite.Run(t, new(UserRepoSuite)) } +func (s *UserRepoSuite) mustCreateUser(u *service.User) *service.User { + s.T().Helper() + + if u.Email == "" { + u.Email = "user-" + time.Now().Format(time.RFC3339Nano) + "@example.com" + } + if u.PasswordHash == "" { + u.PasswordHash = "test-password-hash" + } + if u.Role == "" { + u.Role = service.RoleUser + } + if u.Status == "" { + u.Status = service.StatusActive + } + if u.Concurrency == 0 { + u.Concurrency = 5 + } + + s.Require().NoError(s.repo.Create(s.ctx, u), "create user") + return u +} + +func (s *UserRepoSuite) mustCreateGroup(name string) *service.Group { + s.T().Helper() + + g, err := s.client.Group.Create(). + SetName(name). + SetStatus(service.StatusActive). + Save(s.ctx) + s.Require().NoError(err, "create group") + return groupEntityToService(g) +} + +func (s *UserRepoSuite) mustCreateSubscription(userID, groupID int64, mutate func(*dbent.UserSubscriptionCreate)) *dbent.UserSubscription { + s.T().Helper() + + now := time.Now() + create := s.client.UserSubscription.Create(). + SetUserID(userID). + SetGroupID(groupID). + SetStartsAt(now.Add(-1*time.Hour)). + SetExpiresAt(now.Add(24*time.Hour)). + SetStatus(service.SubscriptionStatusActive). + SetAssignedAt(now). + SetNotes("") + + if mutate != nil { + mutate(create) + } + + sub, err := create.Save(s.ctx) + s.Require().NoError(err, "create subscription") + return sub +} + // --- Create / GetByID / GetByEmail / Update / Delete --- func (s *UserRepoSuite) TestCreate() { - user := &service.User{ + user := s.mustCreateUser(&service.User{ Email: "create@test.com", Username: "testuser", PasswordHash: "test-password-hash", Role: service.RoleUser, Status: service.StatusActive, - } + }) - err := s.repo.Create(s.ctx, user) - s.Require().NoError(err, "Create") s.Require().NotZero(user.ID, "expected ID to be set") got, err := s.repo.GetByID(s.ctx, user.ID) @@ -57,7 +114,7 @@ func (s *UserRepoSuite) TestGetByID_NotFound() { } func (s *UserRepoSuite) TestGetByEmail() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "byemail@test.com"}) + user := s.mustCreateUser(&service.User{Email: "byemail@test.com"}) got, err := s.repo.GetByEmail(s.ctx, user.Email) s.Require().NoError(err, "GetByEmail") @@ -70,19 +127,20 @@ func (s *UserRepoSuite) TestGetByEmail_NotFound() { } func (s *UserRepoSuite) TestUpdate() { - user := userModelToService(mustCreateUser(s.T(), s.db, &userModel{Email: "update@test.com", Username: "original"})) - - user.Username = "updated" - err := s.repo.Update(s.ctx, user) - s.Require().NoError(err, "Update") + user := s.mustCreateUser(&service.User{Email: "update@test.com", Username: "original"}) got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + got.Username = "updated" + s.Require().NoError(s.repo.Update(s.ctx, got), "Update") + + updated, err := s.repo.GetByID(s.ctx, user.ID) s.Require().NoError(err, "GetByID after update") - s.Require().Equal("updated", got.Username) + s.Require().Equal("updated", updated.Username) } func (s *UserRepoSuite) TestDelete() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "delete@test.com"}) + user := s.mustCreateUser(&service.User{Email: "delete@test.com"}) err := s.repo.Delete(s.ctx, user.ID) s.Require().NoError(err, "Delete") @@ -94,8 +152,8 @@ func (s *UserRepoSuite) TestDelete() { // --- List / ListWithFilters --- func (s *UserRepoSuite) TestList() { - mustCreateUser(s.T(), s.db, &userModel{Email: "list1@test.com"}) - mustCreateUser(s.T(), s.db, &userModel{Email: "list2@test.com"}) + s.mustCreateUser(&service.User{Email: "list1@test.com"}) + s.mustCreateUser(&service.User{Email: "list2@test.com"}) users, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "List") @@ -104,8 +162,8 @@ func (s *UserRepoSuite) TestList() { } func (s *UserRepoSuite) TestListWithFilters_Status() { - mustCreateUser(s.T(), s.db, &userModel{Email: "active@test.com", Status: service.StatusActive}) - mustCreateUser(s.T(), s.db, &userModel{Email: "disabled@test.com", Status: service.StatusDisabled}) + s.mustCreateUser(&service.User{Email: "active@test.com", Status: service.StatusActive}) + s.mustCreateUser(&service.User{Email: "disabled@test.com", Status: service.StatusDisabled}) users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.StatusActive, "", "") s.Require().NoError(err) @@ -114,8 +172,8 @@ func (s *UserRepoSuite) TestListWithFilters_Status() { } func (s *UserRepoSuite) TestListWithFilters_Role() { - mustCreateUser(s.T(), s.db, &userModel{Email: "user@test.com", Role: service.RoleUser}) - mustCreateUser(s.T(), s.db, &userModel{Email: "admin@test.com", Role: service.RoleAdmin}) + s.mustCreateUser(&service.User{Email: "user@test.com", Role: service.RoleUser}) + s.mustCreateUser(&service.User{Email: "admin@test.com", Role: service.RoleAdmin}) users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", service.RoleAdmin, "") s.Require().NoError(err) @@ -124,8 +182,8 @@ func (s *UserRepoSuite) TestListWithFilters_Role() { } func (s *UserRepoSuite) TestListWithFilters_Search() { - mustCreateUser(s.T(), s.db, &userModel{Email: "alice@test.com", Username: "Alice"}) - mustCreateUser(s.T(), s.db, &userModel{Email: "bob@test.com", Username: "Bob"}) + s.mustCreateUser(&service.User{Email: "alice@test.com", Username: "Alice"}) + s.mustCreateUser(&service.User{Email: "bob@test.com", Username: "Bob"}) users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "alice") s.Require().NoError(err) @@ -134,8 +192,8 @@ func (s *UserRepoSuite) TestListWithFilters_Search() { } func (s *UserRepoSuite) TestListWithFilters_SearchByUsername() { - mustCreateUser(s.T(), s.db, &userModel{Email: "u1@test.com", Username: "JohnDoe"}) - mustCreateUser(s.T(), s.db, &userModel{Email: "u2@test.com", Username: "JaneSmith"}) + s.mustCreateUser(&service.User{Email: "u1@test.com", Username: "JohnDoe"}) + s.mustCreateUser(&service.User{Email: "u2@test.com", Username: "JaneSmith"}) users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "john") s.Require().NoError(err) @@ -144,8 +202,8 @@ func (s *UserRepoSuite) TestListWithFilters_SearchByUsername() { } func (s *UserRepoSuite) TestListWithFilters_SearchByWechat() { - mustCreateUser(s.T(), s.db, &userModel{Email: "w1@test.com", Wechat: "wx_hello"}) - mustCreateUser(s.T(), s.db, &userModel{Email: "w2@test.com", Wechat: "wx_world"}) + s.mustCreateUser(&service.User{Email: "w1@test.com", Wechat: "wx_hello"}) + s.mustCreateUser(&service.User{Email: "w2@test.com", Wechat: "wx_world"}) users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "wx_hello") s.Require().NoError(err) @@ -154,20 +212,17 @@ func (s *UserRepoSuite) TestListWithFilters_SearchByWechat() { } func (s *UserRepoSuite) TestListWithFilters_LoadsActiveSubscriptions() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "sub@test.com", Status: service.StatusActive}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-sub"}) + user := s.mustCreateUser(&service.User{Email: "sub@test.com", Status: service.StatusActive}) + groupActive := s.mustCreateGroup("g-sub-active") + groupExpired := s.mustCreateGroup("g-sub-expired") - _ = mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(1 * time.Hour), + _ = s.mustCreateSubscription(user.ID, groupActive.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusActive) + c.SetExpiresAt(time.Now().Add(1 * time.Hour)) }) - _ = mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusExpired, - ExpiresAt: time.Now().Add(-1 * time.Hour), + _ = s.mustCreateSubscription(user.ID, groupExpired.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-1 * time.Hour)) }) users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "sub@") @@ -175,11 +230,11 @@ func (s *UserRepoSuite) TestListWithFilters_LoadsActiveSubscriptions() { s.Require().Len(users, 1, "expected 1 user") s.Require().Len(users[0].Subscriptions, 1, "expected 1 active subscription") s.Require().NotNil(users[0].Subscriptions[0].Group, "expected subscription group preload") - s.Require().Equal(group.ID, users[0].Subscriptions[0].Group.ID, "group ID mismatch") + s.Require().Equal(groupActive.ID, users[0].Subscriptions[0].Group.ID, "group ID mismatch") } func (s *UserRepoSuite) TestListWithFilters_CombinedFilters() { - mustCreateUser(s.T(), s.db, &userModel{ + s.mustCreateUser(&service.User{ Email: "a@example.com", Username: "Alice", Wechat: "wx_a", @@ -187,7 +242,7 @@ func (s *UserRepoSuite) TestListWithFilters_CombinedFilters() { Status: service.StatusActive, Balance: 10, }) - target := mustCreateUser(s.T(), s.db, &userModel{ + target := s.mustCreateUser(&service.User{ Email: "b@example.com", Username: "Bob", Wechat: "wx_b", @@ -195,7 +250,7 @@ func (s *UserRepoSuite) TestListWithFilters_CombinedFilters() { Status: service.StatusActive, Balance: 1, }) - mustCreateUser(s.T(), s.db, &userModel{ + s.mustCreateUser(&service.User{ Email: "c@example.com", Role: service.RoleAdmin, Status: service.StatusDisabled, @@ -211,40 +266,40 @@ func (s *UserRepoSuite) TestListWithFilters_CombinedFilters() { // --- Balance operations --- func (s *UserRepoSuite) TestUpdateBalance() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "bal@test.com", Balance: 10}) + user := s.mustCreateUser(&service.User{Email: "bal@test.com", Balance: 10}) err := s.repo.UpdateBalance(s.ctx, user.ID, 2.5) s.Require().NoError(err, "UpdateBalance") got, err := s.repo.GetByID(s.ctx, user.ID) s.Require().NoError(err) - s.Require().Equal(12.5, got.Balance) + s.Require().InDelta(12.5, got.Balance, 1e-6) } func (s *UserRepoSuite) TestUpdateBalance_Negative() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "balneg@test.com", Balance: 10}) + user := s.mustCreateUser(&service.User{Email: "balneg@test.com", Balance: 10}) err := s.repo.UpdateBalance(s.ctx, user.ID, -3) s.Require().NoError(err, "UpdateBalance with negative") got, err := s.repo.GetByID(s.ctx, user.ID) s.Require().NoError(err) - s.Require().Equal(7.0, got.Balance) + s.Require().InDelta(7.0, got.Balance, 1e-6) } func (s *UserRepoSuite) TestDeductBalance() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "deduct@test.com", Balance: 10}) + user := s.mustCreateUser(&service.User{Email: "deduct@test.com", Balance: 10}) err := s.repo.DeductBalance(s.ctx, user.ID, 5) s.Require().NoError(err, "DeductBalance") got, err := s.repo.GetByID(s.ctx, user.ID) s.Require().NoError(err) - s.Require().Equal(5.0, got.Balance) + s.Require().InDelta(5.0, got.Balance, 1e-6) } func (s *UserRepoSuite) TestDeductBalance_InsufficientFunds() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "insuf@test.com", Balance: 5}) + user := s.mustCreateUser(&service.User{Email: "insuf@test.com", Balance: 5}) err := s.repo.DeductBalance(s.ctx, user.ID, 999) s.Require().Error(err, "expected error for insufficient balance") @@ -252,20 +307,20 @@ func (s *UserRepoSuite) TestDeductBalance_InsufficientFunds() { } func (s *UserRepoSuite) TestDeductBalance_ExactAmount() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "exact@test.com", Balance: 10}) + user := s.mustCreateUser(&service.User{Email: "exact@test.com", Balance: 10}) err := s.repo.DeductBalance(s.ctx, user.ID, 10) s.Require().NoError(err, "DeductBalance exact amount") got, err := s.repo.GetByID(s.ctx, user.ID) s.Require().NoError(err) - s.Require().Zero(got.Balance) + s.Require().InDelta(0.0, got.Balance, 1e-6) } // --- Concurrency --- func (s *UserRepoSuite) TestUpdateConcurrency() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "conc@test.com", Concurrency: 5}) + user := s.mustCreateUser(&service.User{Email: "conc@test.com", Concurrency: 5}) err := s.repo.UpdateConcurrency(s.ctx, user.ID, 3) s.Require().NoError(err, "UpdateConcurrency") @@ -276,7 +331,7 @@ func (s *UserRepoSuite) TestUpdateConcurrency() { } func (s *UserRepoSuite) TestUpdateConcurrency_Negative() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "concneg@test.com", Concurrency: 5}) + user := s.mustCreateUser(&service.User{Email: "concneg@test.com", Concurrency: 5}) err := s.repo.UpdateConcurrency(s.ctx, user.ID, -2) s.Require().NoError(err, "UpdateConcurrency negative") @@ -289,7 +344,7 @@ func (s *UserRepoSuite) TestUpdateConcurrency_Negative() { // --- ExistsByEmail --- func (s *UserRepoSuite) TestExistsByEmail() { - mustCreateUser(s.T(), s.db, &userModel{Email: "exists@test.com"}) + s.mustCreateUser(&service.User{Email: "exists@test.com"}) exists, err := s.repo.ExistsByEmail(s.ctx, "exists@test.com") s.Require().NoError(err, "ExistsByEmail") @@ -303,34 +358,38 @@ func (s *UserRepoSuite) TestExistsByEmail() { // --- RemoveGroupFromAllowedGroups --- func (s *UserRepoSuite) TestRemoveGroupFromAllowedGroups() { - groupID := int64(42) - userA := mustCreateUser(s.T(), s.db, &userModel{ + target := s.mustCreateGroup("target-42") + other := s.mustCreateGroup("other-7") + + userA := s.mustCreateUser(&service.User{ Email: "a1@example.com", - AllowedGroups: pq.Int64Array{groupID, 7}, + AllowedGroups: []int64{target.ID, other.ID}, }) - mustCreateUser(s.T(), s.db, &userModel{ + s.mustCreateUser(&service.User{ Email: "a2@example.com", - AllowedGroups: pq.Int64Array{7}, + AllowedGroups: []int64{other.ID}, }) - affected, err := s.repo.RemoveGroupFromAllowedGroups(s.ctx, groupID) + affected, err := s.repo.RemoveGroupFromAllowedGroups(s.ctx, target.ID) s.Require().NoError(err, "RemoveGroupFromAllowedGroups") s.Require().Equal(int64(1), affected, "expected 1 affected row") got, err := s.repo.GetByID(s.ctx, userA.ID) s.Require().NoError(err, "GetByID") - for _, id := range got.AllowedGroups { - s.Require().NotEqual(groupID, id, "expected groupID to be removed from allowed_groups") - } + s.Require().NotContains(got.AllowedGroups, target.ID) + s.Require().Contains(got.AllowedGroups, other.ID) } func (s *UserRepoSuite) TestRemoveGroupFromAllowedGroups_NoMatch() { - mustCreateUser(s.T(), s.db, &userModel{ + groupA := s.mustCreateGroup("nomatch-a") + groupB := s.mustCreateGroup("nomatch-b") + + s.mustCreateUser(&service.User{ Email: "nomatch@test.com", - AllowedGroups: pq.Int64Array{1, 2, 3}, + AllowedGroups: []int64{groupA.ID, groupB.ID}, }) - affected, err := s.repo.RemoveGroupFromAllowedGroups(s.ctx, 999) + affected, err := s.repo.RemoveGroupFromAllowedGroups(s.ctx, 999999) s.Require().NoError(err) s.Require().Zero(affected, "expected no affected rows") } @@ -338,12 +397,12 @@ func (s *UserRepoSuite) TestRemoveGroupFromAllowedGroups_NoMatch() { // --- GetFirstAdmin --- func (s *UserRepoSuite) TestGetFirstAdmin() { - admin1 := mustCreateUser(s.T(), s.db, &userModel{ + admin1 := s.mustCreateUser(&service.User{ Email: "admin1@example.com", Role: service.RoleAdmin, Status: service.StatusActive, }) - mustCreateUser(s.T(), s.db, &userModel{ + s.mustCreateUser(&service.User{ Email: "admin2@example.com", Role: service.RoleAdmin, Status: service.StatusActive, @@ -355,7 +414,7 @@ func (s *UserRepoSuite) TestGetFirstAdmin() { } func (s *UserRepoSuite) TestGetFirstAdmin_NoAdmin() { - mustCreateUser(s.T(), s.db, &userModel{ + s.mustCreateUser(&service.User{ Email: "user@example.com", Role: service.RoleUser, Status: service.StatusActive, @@ -366,12 +425,12 @@ func (s *UserRepoSuite) TestGetFirstAdmin_NoAdmin() { } func (s *UserRepoSuite) TestGetFirstAdmin_DisabledAdminIgnored() { - mustCreateUser(s.T(), s.db, &userModel{ + s.mustCreateUser(&service.User{ Email: "disabled@example.com", Role: service.RoleAdmin, Status: service.StatusDisabled, }) - activeAdmin := mustCreateUser(s.T(), s.db, &userModel{ + activeAdmin := s.mustCreateUser(&service.User{ Email: "active@example.com", Role: service.RoleAdmin, Status: service.StatusActive, @@ -382,10 +441,10 @@ func (s *UserRepoSuite) TestGetFirstAdmin_DisabledAdminIgnored() { s.Require().Equal(activeAdmin.ID, got.ID, "should return only active admin") } -// --- Combined original test --- +// --- Combined --- func (s *UserRepoSuite) TestCRUD_And_Filters_And_AtomicUpdates() { - user1 := mustCreateUser(s.T(), s.db, &userModel{ + user1 := s.mustCreateUser(&service.User{ Email: "a@example.com", Username: "Alice", Wechat: "wx_a", @@ -393,7 +452,7 @@ func (s *UserRepoSuite) TestCRUD_And_Filters_And_AtomicUpdates() { Status: service.StatusActive, Balance: 10, }) - user2 := mustCreateUser(s.T(), s.db, &userModel{ + user2 := s.mustCreateUser(&service.User{ Email: "b@example.com", Username: "Bob", Wechat: "wx_b", @@ -401,7 +460,7 @@ func (s *UserRepoSuite) TestCRUD_And_Filters_And_AtomicUpdates() { Status: service.StatusActive, Balance: 1, }) - _ = mustCreateUser(s.T(), s.db, &userModel{ + s.mustCreateUser(&service.User{ Email: "c@example.com", Role: service.RoleAdmin, Status: service.StatusDisabled, @@ -424,12 +483,12 @@ func (s *UserRepoSuite) TestCRUD_And_Filters_And_AtomicUpdates() { s.Require().NoError(s.repo.UpdateBalance(s.ctx, user1.ID, 2.5), "UpdateBalance") got3, err := s.repo.GetByID(s.ctx, user1.ID) s.Require().NoError(err, "GetByID after UpdateBalance") - s.Require().Equal(12.5, got3.Balance, "UpdateBalance mismatch") + s.Require().InDelta(12.5, got3.Balance, 1e-6) s.Require().NoError(s.repo.DeductBalance(s.ctx, user1.ID, 5), "DeductBalance") got4, err := s.repo.GetByID(s.ctx, user1.ID) s.Require().NoError(err, "GetByID after DeductBalance") - s.Require().Equal(7.5, got4.Balance, "DeductBalance mismatch") + s.Require().InDelta(7.5, got4.Balance, 1e-6) err = s.repo.DeductBalance(s.ctx, user1.ID, 999) s.Require().Error(err, "DeductBalance expected error for insufficient balance") @@ -438,7 +497,7 @@ func (s *UserRepoSuite) TestCRUD_And_Filters_And_AtomicUpdates() { s.Require().NoError(s.repo.UpdateConcurrency(s.ctx, user1.ID, 3), "UpdateConcurrency") got5, err := s.repo.GetByID(s.ctx, user1.ID) s.Require().NoError(err, "GetByID after UpdateConcurrency") - s.Require().Equal(user1.Concurrency+3, got5.Concurrency, "UpdateConcurrency mismatch") + s.Require().Equal(user1.Concurrency+3, got5.Concurrency) params := pagination.PaginationParams{Page: 1, PageSize: 10} users, page, err := s.repo.ListWithFilters(s.ctx, params, service.StatusActive, service.RoleAdmin, "b@") @@ -447,3 +506,4 @@ func (s *UserRepoSuite) TestCRUD_And_Filters_And_AtomicUpdates() { s.Require().Len(users, 1, "ListWithFilters len mismatch") s.Require().Equal(user2.ID, users[0].ID, "ListWithFilters result mismatch") } + diff --git a/backend/internal/repository/user_subscription_repo.go b/backend/internal/repository/user_subscription_repo.go index 4c7768a8..918ccab4 100644 --- a/backend/internal/repository/user_subscription_repo.go +++ b/backend/internal/repository/user_subscription_repo.go @@ -4,333 +4,336 @@ import ( "context" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" - - "gorm.io/gorm" ) type userSubscriptionRepository struct { - db *gorm.DB + client *dbent.Client } -func NewUserSubscriptionRepository(db *gorm.DB) service.UserSubscriptionRepository { - return &userSubscriptionRepository{db: db} +func NewUserSubscriptionRepository(client *dbent.Client) service.UserSubscriptionRepository { + return &userSubscriptionRepository{client: client} } func (r *userSubscriptionRepository) Create(ctx context.Context, sub *service.UserSubscription) error { - m := userSubscriptionModelFromService(sub) - err := r.db.WithContext(ctx).Create(m).Error + if sub == nil { + return nil + } + + builder := r.client.UserSubscription.Create(). + SetUserID(sub.UserID). + SetGroupID(sub.GroupID). + SetExpiresAt(sub.ExpiresAt). + SetNillableDailyWindowStart(sub.DailyWindowStart). + SetNillableWeeklyWindowStart(sub.WeeklyWindowStart). + SetNillableMonthlyWindowStart(sub.MonthlyWindowStart). + SetDailyUsageUsd(sub.DailyUsageUSD). + SetWeeklyUsageUsd(sub.WeeklyUsageUSD). + SetMonthlyUsageUsd(sub.MonthlyUsageUSD). + SetNillableAssignedBy(sub.AssignedBy) + + if sub.StartsAt.IsZero() { + builder.SetStartsAt(time.Now()) + } else { + builder.SetStartsAt(sub.StartsAt) + } + if sub.Status != "" { + builder.SetStatus(sub.Status) + } + if !sub.AssignedAt.IsZero() { + builder.SetAssignedAt(sub.AssignedAt) + } + // Keep compatibility with historical behavior: always store notes as a string value. + builder.SetNotes(sub.Notes) + + created, err := builder.Save(ctx) if err == nil { - applyUserSubscriptionModelToService(sub, m) + applyUserSubscriptionEntityToService(sub, created) } return translatePersistenceError(err, nil, service.ErrSubscriptionAlreadyExists) } func (r *userSubscriptionRepository) GetByID(ctx context.Context, id int64) (*service.UserSubscription, error) { - var m userSubscriptionModel - err := r.db.WithContext(ctx). - Preload("User"). - Preload("Group"). - Preload("AssignedByUser"). - First(&m, id).Error + m, err := r.client.UserSubscription.Query(). + Where(usersubscription.IDEQ(id)). + WithUser(). + WithGroup(). + WithAssignedByUser(). + Only(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } - return userSubscriptionModelToService(&m), nil + return userSubscriptionEntityToService(m), nil } func (r *userSubscriptionRepository) GetByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { - var m userSubscriptionModel - err := r.db.WithContext(ctx). - Preload("Group"). - Where("user_id = ? AND group_id = ?", userID, groupID). - First(&m).Error + m, err := r.client.UserSubscription.Query(). + Where(usersubscription.UserIDEQ(userID), usersubscription.GroupIDEQ(groupID)). + WithGroup(). + Only(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } - return userSubscriptionModelToService(&m), nil + return userSubscriptionEntityToService(m), nil } func (r *userSubscriptionRepository) GetActiveByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { - var m userSubscriptionModel - err := r.db.WithContext(ctx). - Preload("Group"). - Where("user_id = ? AND group_id = ? AND status = ? AND expires_at > ?", - userID, groupID, service.SubscriptionStatusActive, time.Now()). - First(&m).Error + m, err := r.client.UserSubscription.Query(). + Where( + usersubscription.UserIDEQ(userID), + usersubscription.GroupIDEQ(groupID), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtGT(time.Now()), + ). + WithGroup(). + Only(ctx) if err != nil { return nil, translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } - return userSubscriptionModelToService(&m), nil + return userSubscriptionEntityToService(m), nil } func (r *userSubscriptionRepository) Update(ctx context.Context, sub *service.UserSubscription) error { - sub.UpdatedAt = time.Now() - m := userSubscriptionModelFromService(sub) - err := r.db.WithContext(ctx).Save(m).Error - if err == nil { - applyUserSubscriptionModelToService(sub, m) + if sub == nil { + return nil } - return err + + builder := r.client.UserSubscription.UpdateOneID(sub.ID). + SetUserID(sub.UserID). + SetGroupID(sub.GroupID). + SetStartsAt(sub.StartsAt). + SetExpiresAt(sub.ExpiresAt). + SetStatus(sub.Status). + SetNillableDailyWindowStart(sub.DailyWindowStart). + SetNillableWeeklyWindowStart(sub.WeeklyWindowStart). + SetNillableMonthlyWindowStart(sub.MonthlyWindowStart). + SetDailyUsageUsd(sub.DailyUsageUSD). + SetWeeklyUsageUsd(sub.WeeklyUsageUSD). + SetMonthlyUsageUsd(sub.MonthlyUsageUSD). + SetNillableAssignedBy(sub.AssignedBy). + SetAssignedAt(sub.AssignedAt). + SetNotes(sub.Notes) + + updated, err := builder.Save(ctx) + if err == nil { + applyUserSubscriptionEntityToService(sub, updated) + return nil + } + return translatePersistenceError(err, service.ErrSubscriptionNotFound, service.ErrSubscriptionAlreadyExists) } func (r *userSubscriptionRepository) Delete(ctx context.Context, id int64) error { - return r.db.WithContext(ctx).Delete(&userSubscriptionModel{}, id).Error + // Match GORM semantics: deleting a missing row is not an error. + _, err := r.client.UserSubscription.Delete().Where(usersubscription.IDEQ(id)).Exec(ctx) + return err } func (r *userSubscriptionRepository) ListByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { - var subs []userSubscriptionModel - err := r.db.WithContext(ctx). - Preload("Group"). - Where("user_id = ?", userID). - Order("created_at DESC"). - Find(&subs).Error + subs, err := r.client.UserSubscription.Query(). + Where(usersubscription.UserIDEQ(userID)). + WithGroup(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + All(ctx) if err != nil { return nil, err } - return userSubscriptionModelsToService(subs), nil + return userSubscriptionEntitiesToService(subs), nil } func (r *userSubscriptionRepository) ListActiveByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { - var subs []userSubscriptionModel - err := r.db.WithContext(ctx). - Preload("Group"). - Where("user_id = ? AND status = ? AND expires_at > ?", - userID, service.SubscriptionStatusActive, time.Now()). - Order("created_at DESC"). - Find(&subs).Error + subs, err := r.client.UserSubscription.Query(). + Where( + usersubscription.UserIDEQ(userID), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtGT(time.Now()), + ). + WithGroup(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + All(ctx) if err != nil { return nil, err } - return userSubscriptionModelsToService(subs), nil + return userSubscriptionEntitiesToService(subs), nil } func (r *userSubscriptionRepository) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) { - var subs []userSubscriptionModel - var total int64 + q := r.client.UserSubscription.Query().Where(usersubscription.GroupIDEQ(groupID)) - query := r.db.WithContext(ctx).Model(&userSubscriptionModel{}).Where("group_id = ?", groupID) - if err := query.Count(&total).Error; err != nil { - return nil, nil, err - } - - err := query. - Preload("User"). - Preload("Group"). - Order("created_at DESC"). - Offset(params.Offset()). - Limit(params.Limit()). - Find(&subs).Error + total, err := q.Clone().Count(ctx) if err != nil { return nil, nil, err } - return userSubscriptionModelsToService(subs), paginationResultFromTotal(total, params), nil + subs, err := q. + WithUser(). + WithGroup(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + Offset(params.Offset()). + Limit(params.Limit()). + All(ctx) + if err != nil { + return nil, nil, err + } + + return userSubscriptionEntitiesToService(subs), paginationResultFromTotal(int64(total), params), nil } func (r *userSubscriptionRepository) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]service.UserSubscription, *pagination.PaginationResult, error) { - var subs []userSubscriptionModel - var total int64 - - query := r.db.WithContext(ctx).Model(&userSubscriptionModel{}) + q := r.client.UserSubscription.Query() if userID != nil { - query = query.Where("user_id = ?", *userID) + q = q.Where(usersubscription.UserIDEQ(*userID)) } if groupID != nil { - query = query.Where("group_id = ?", *groupID) + q = q.Where(usersubscription.GroupIDEQ(*groupID)) } if status != "" { - query = query.Where("status = ?", status) + q = q.Where(usersubscription.StatusEQ(status)) } - if err := query.Count(&total).Error; err != nil { - return nil, nil, err - } - - err := query. - Preload("User"). - Preload("Group"). - Preload("AssignedByUser"). - Order("created_at DESC"). - Offset(params.Offset()). - Limit(params.Limit()). - Find(&subs).Error + total, err := q.Clone().Count(ctx) if err != nil { return nil, nil, err } - return userSubscriptionModelsToService(subs), paginationResultFromTotal(total, params), nil + subs, err := q. + WithUser(). + WithGroup(). + WithAssignedByUser(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + Offset(params.Offset()). + Limit(params.Limit()). + All(ctx) + if err != nil { + return nil, nil, err + } + + return userSubscriptionEntitiesToService(subs), paginationResultFromTotal(int64(total), params), nil } func (r *userSubscriptionRepository) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("user_id = ? AND group_id = ?", userID, groupID). - Count(&count).Error - return count > 0, err + return r.client.UserSubscription.Query(). + Where(usersubscription.UserIDEQ(userID), usersubscription.GroupIDEQ(groupID)). + Exist(ctx) } func (r *userSubscriptionRepository) ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", subscriptionID). - Updates(map[string]any{ - "expires_at": newExpiresAt, - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(subscriptionID). + SetExpiresAt(newExpiresAt). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) UpdateStatus(ctx context.Context, subscriptionID int64, status string) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", subscriptionID). - Updates(map[string]any{ - "status": status, - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(subscriptionID). + SetStatus(status). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) UpdateNotes(ctx context.Context, subscriptionID int64, notes string) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", subscriptionID). - Updates(map[string]any{ - "notes": notes, - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(subscriptionID). + SetNotes(notes). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) ActivateWindows(ctx context.Context, id int64, start time.Time) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", id). - Updates(map[string]any{ - "daily_window_start": start, - "weekly_window_start": start, - "monthly_window_start": start, - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(id). + SetDailyWindowStart(start). + SetWeeklyWindowStart(start). + SetMonthlyWindowStart(start). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) ResetDailyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", id). - Updates(map[string]any{ - "daily_usage_usd": 0, - "daily_window_start": newWindowStart, - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(id). + SetDailyUsageUsd(0). + SetDailyWindowStart(newWindowStart). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) ResetWeeklyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", id). - Updates(map[string]any{ - "weekly_usage_usd": 0, - "weekly_window_start": newWindowStart, - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(id). + SetWeeklyUsageUsd(0). + SetWeeklyWindowStart(newWindowStart). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) ResetMonthlyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", id). - Updates(map[string]any{ - "monthly_usage_usd": 0, - "monthly_window_start": newWindowStart, - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(id). + SetMonthlyUsageUsd(0). + SetMonthlyWindowStart(newWindowStart). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) IncrementUsage(ctx context.Context, id int64, costUSD float64) error { - return r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("id = ?", id). - Updates(map[string]any{ - "daily_usage_usd": gorm.Expr("daily_usage_usd + ?", costUSD), - "weekly_usage_usd": gorm.Expr("weekly_usage_usd + ?", costUSD), - "monthly_usage_usd": gorm.Expr("monthly_usage_usd + ?", costUSD), - "updated_at": time.Now(), - }).Error + _, err := r.client.UserSubscription.UpdateOneID(id). + AddDailyUsageUsd(costUSD). + AddWeeklyUsageUsd(costUSD). + AddMonthlyUsageUsd(costUSD). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) } func (r *userSubscriptionRepository) BatchUpdateExpiredStatus(ctx context.Context) (int64, error) { - result := r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("status = ? AND expires_at <= ?", service.SubscriptionStatusActive, time.Now()). - Updates(map[string]any{ - "status": service.SubscriptionStatusExpired, - "updated_at": time.Now(), - }) - return result.RowsAffected, result.Error + n, err := r.client.UserSubscription.Update(). + Where( + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtLTE(time.Now()), + ). + SetStatus(service.SubscriptionStatusExpired). + Save(ctx) + return int64(n), err } // Extra repository helpers (currently used only by integration tests). func (r *userSubscriptionRepository) ListExpired(ctx context.Context) ([]service.UserSubscription, error) { - var subs []userSubscriptionModel - err := r.db.WithContext(ctx). - Where("status = ? AND expires_at <= ?", service.SubscriptionStatusActive, time.Now()). - Find(&subs).Error + subs, err := r.client.UserSubscription.Query(). + Where( + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtLTE(time.Now()), + ). + All(ctx) if err != nil { return nil, err } - return userSubscriptionModelsToService(subs), nil + return userSubscriptionEntitiesToService(subs), nil } func (r *userSubscriptionRepository) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("group_id = ?", groupID). - Count(&count).Error - return count, err + count, err := r.client.UserSubscription.Query().Where(usersubscription.GroupIDEQ(groupID)).Count(ctx) + return int64(count), err } func (r *userSubscriptionRepository) CountActiveByGroupID(ctx context.Context, groupID int64) (int64, error) { - var count int64 - err := r.db.WithContext(ctx).Model(&userSubscriptionModel{}). - Where("group_id = ? AND status = ? AND expires_at > ?", - groupID, service.SubscriptionStatusActive, time.Now()). - Count(&count).Error - return count, err + count, err := r.client.UserSubscription.Query(). + Where( + usersubscription.GroupIDEQ(groupID), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtGT(time.Now()), + ). + Count(ctx) + return int64(count), err } func (r *userSubscriptionRepository) DeleteByGroupID(ctx context.Context, groupID int64) (int64, error) { - result := r.db.WithContext(ctx).Where("group_id = ?", groupID).Delete(&userSubscriptionModel{}) - return result.RowsAffected, result.Error + n, err := r.client.UserSubscription.Delete().Where(usersubscription.GroupIDEQ(groupID)).Exec(ctx) + return int64(n), err } -type userSubscriptionModel struct { - ID int64 `gorm:"primaryKey"` - UserID int64 `gorm:"index;not null"` - GroupID int64 `gorm:"index;not null"` - - StartsAt time.Time `gorm:"not null"` - ExpiresAt time.Time `gorm:"not null"` - Status string `gorm:"size:20;default:active;not null"` - - DailyWindowStart *time.Time - WeeklyWindowStart *time.Time - MonthlyWindowStart *time.Time - - DailyUsageUSD float64 `gorm:"type:decimal(20,10);default:0;not null"` - WeeklyUsageUSD float64 `gorm:"type:decimal(20,10);default:0;not null"` - MonthlyUsageUSD float64 `gorm:"type:decimal(20,10);default:0;not null"` - - AssignedBy *int64 `gorm:"index"` - AssignedAt time.Time `gorm:"not null"` - Notes string `gorm:"type:text"` - - CreatedAt time.Time `gorm:"not null"` - UpdatedAt time.Time `gorm:"not null"` - - User *userModel `gorm:"foreignKey:UserID"` - Group *groupModel `gorm:"foreignKey:GroupID"` - AssignedByUser *userModel `gorm:"foreignKey:AssignedBy"` -} - -func (userSubscriptionModel) TableName() string { return "user_subscriptions" } - -func userSubscriptionModelToService(m *userSubscriptionModel) *service.UserSubscription { +func userSubscriptionEntityToService(m *dbent.UserSubscription) *service.UserSubscription { if m == nil { return nil } - return &service.UserSubscription{ + out := &service.UserSubscription{ ID: m.ID, UserID: m.UserID, GroupID: m.GroupID, @@ -340,60 +343,42 @@ func userSubscriptionModelToService(m *userSubscriptionModel) *service.UserSubsc DailyWindowStart: m.DailyWindowStart, WeeklyWindowStart: m.WeeklyWindowStart, MonthlyWindowStart: m.MonthlyWindowStart, - DailyUsageUSD: m.DailyUsageUSD, - WeeklyUsageUSD: m.WeeklyUsageUSD, - MonthlyUsageUSD: m.MonthlyUsageUSD, + DailyUsageUSD: m.DailyUsageUsd, + WeeklyUsageUSD: m.WeeklyUsageUsd, + MonthlyUsageUSD: m.MonthlyUsageUsd, AssignedBy: m.AssignedBy, AssignedAt: m.AssignedAt, - Notes: m.Notes, + Notes: derefString(m.Notes), CreatedAt: m.CreatedAt, UpdatedAt: m.UpdatedAt, - User: userModelToService(m.User), - Group: groupModelToService(m.Group), - AssignedByUser: userModelToService(m.AssignedByUser), } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + if m.Edges.Group != nil { + out.Group = groupEntityToService(m.Edges.Group) + } + if m.Edges.AssignedByUser != nil { + out.AssignedByUser = userEntityToService(m.Edges.AssignedByUser) + } + return out } -func userSubscriptionModelsToService(models []userSubscriptionModel) []service.UserSubscription { +func userSubscriptionEntitiesToService(models []*dbent.UserSubscription) []service.UserSubscription { out := make([]service.UserSubscription, 0, len(models)) for i := range models { - if s := userSubscriptionModelToService(&models[i]); s != nil { + if s := userSubscriptionEntityToService(models[i]); s != nil { out = append(out, *s) } } return out } -func userSubscriptionModelFromService(s *service.UserSubscription) *userSubscriptionModel { - if s == nil { - return nil - } - return &userSubscriptionModel{ - ID: s.ID, - UserID: s.UserID, - GroupID: s.GroupID, - StartsAt: s.StartsAt, - ExpiresAt: s.ExpiresAt, - Status: s.Status, - DailyWindowStart: s.DailyWindowStart, - WeeklyWindowStart: s.WeeklyWindowStart, - MonthlyWindowStart: s.MonthlyWindowStart, - DailyUsageUSD: s.DailyUsageUSD, - WeeklyUsageUSD: s.WeeklyUsageUSD, - MonthlyUsageUSD: s.MonthlyUsageUSD, - AssignedBy: s.AssignedBy, - AssignedAt: s.AssignedAt, - Notes: s.Notes, - CreatedAt: s.CreatedAt, - UpdatedAt: s.UpdatedAt, - } -} - -func applyUserSubscriptionModelToService(sub *service.UserSubscription, m *userSubscriptionModel) { - if sub == nil || m == nil { +func applyUserSubscriptionEntityToService(dst *service.UserSubscription, src *dbent.UserSubscription) { + if dst == nil || src == nil { return } - sub.ID = m.ID - sub.CreatedAt = m.CreatedAt - sub.UpdatedAt = m.UpdatedAt + dst.ID = src.ID + dst.CreatedAt = src.CreatedAt + dst.UpdatedAt = src.UpdatedAt } diff --git a/backend/internal/repository/user_subscription_repo_integration_test.go b/backend/internal/repository/user_subscription_repo_integration_test.go index f990b802..e9859012 100644 --- a/backend/internal/repository/user_subscription_repo_integration_test.go +++ b/backend/internal/repository/user_subscription_repo_integration_test.go @@ -7,34 +7,85 @@ import ( "testing" "time" + dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/stretchr/testify/suite" - "gorm.io/gorm" ) type UserSubscriptionRepoSuite struct { suite.Suite - ctx context.Context - db *gorm.DB - repo *userSubscriptionRepository + ctx context.Context + client *dbent.Client + repo *userSubscriptionRepository } func (s *UserSubscriptionRepoSuite) SetupTest() { s.ctx = context.Background() - s.db = testTx(s.T()) - s.repo = NewUserSubscriptionRepository(s.db).(*userSubscriptionRepository) + client, _ := testEntSQLTx(s.T()) + s.client = client + s.repo = NewUserSubscriptionRepository(s.client).(*userSubscriptionRepository) } func TestUserSubscriptionRepoSuite(t *testing.T) { suite.Run(t, new(UserSubscriptionRepoSuite)) } +func (s *UserSubscriptionRepoSuite) mustCreateUser(email string, role string) *service.User { + s.T().Helper() + + if role == "" { + role = service.RoleUser + } + + u, err := s.client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + SetStatus(service.StatusActive). + SetRole(role). + Save(s.ctx) + s.Require().NoError(err, "create user") + return userEntityToService(u) +} + +func (s *UserSubscriptionRepoSuite) mustCreateGroup(name string) *service.Group { + s.T().Helper() + + g, err := s.client.Group.Create(). + SetName(name). + SetStatus(service.StatusActive). + Save(s.ctx) + s.Require().NoError(err, "create group") + return groupEntityToService(g) +} + +func (s *UserSubscriptionRepoSuite) mustCreateSubscription(userID, groupID int64, mutate func(*dbent.UserSubscriptionCreate)) *dbent.UserSubscription { + s.T().Helper() + + now := time.Now() + create := s.client.UserSubscription.Create(). + SetUserID(userID). + SetGroupID(groupID). + SetStartsAt(now.Add(-1*time.Hour)). + SetExpiresAt(now.Add(24*time.Hour)). + SetStatus(service.SubscriptionStatusActive). + SetAssignedAt(now). + SetNotes("") + + if mutate != nil { + mutate(create) + } + + sub, err := create.Save(s.ctx) + s.Require().NoError(err, "create user subscription") + return sub +} + // --- Create / GetByID / Update / Delete --- func (s *UserSubscriptionRepoSuite) TestCreate() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "sub-create@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-create"}) + user := s.mustCreateUser("sub-create@test.com", service.RoleUser) + group := s.mustCreateGroup("g-create") sub := &service.UserSubscription{ UserID: user.ID, @@ -54,16 +105,12 @@ func (s *UserSubscriptionRepoSuite) TestCreate() { } func (s *UserSubscriptionRepoSuite) TestGetByID_WithPreloads() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "preload@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-preload"}) - admin := mustCreateUser(s.T(), s.db, &userModel{Email: "admin@test.com", Role: service.RoleAdmin}) + user := s.mustCreateUser("preload@test.com", service.RoleUser) + group := s.mustCreateGroup("g-preload") + admin := s.mustCreateUser("admin@test.com", service.RoleAdmin) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - AssignedBy: &admin.ID, + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetAssignedBy(admin.ID) }) got, err := s.repo.GetByID(s.ctx, sub.ID) @@ -82,18 +129,15 @@ func (s *UserSubscriptionRepoSuite) TestGetByID_NotFound() { } func (s *UserSubscriptionRepoSuite) TestUpdate() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "update@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-update"}) - sub := userSubscriptionModelToService(mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - })) + user := s.mustCreateUser("update@test.com", service.RoleUser) + group := s.mustCreateGroup("g-update") + created := s.mustCreateSubscription(user.ID, group.ID, nil) + + sub, err := s.repo.GetByID(s.ctx, created.ID) + s.Require().NoError(err, "GetByID") sub.Notes = "updated notes" - err := s.repo.Update(s.ctx, sub) - s.Require().NoError(err, "Update") + s.Require().NoError(s.repo.Update(s.ctx, sub), "Update") got, err := s.repo.GetByID(s.ctx, sub.ID) s.Require().NoError(err, "GetByID after update") @@ -101,14 +145,9 @@ func (s *UserSubscriptionRepoSuite) TestUpdate() { } func (s *UserSubscriptionRepoSuite) TestDelete() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "delete@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-delete"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("delete@test.com", service.RoleUser) + group := s.mustCreateGroup("g-delete") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) err := s.repo.Delete(s.ctx, sub.ID) s.Require().NoError(err, "Delete") @@ -117,17 +156,16 @@ func (s *UserSubscriptionRepoSuite) TestDelete() { s.Require().Error(err, "expected error after delete") } +func (s *UserSubscriptionRepoSuite) TestDelete_Idempotent() { + s.Require().NoError(s.repo.Delete(s.ctx, 42424242), "Delete should be idempotent") +} + // --- GetByUserIDAndGroupID / GetActiveByUserIDAndGroupID --- func (s *UserSubscriptionRepoSuite) TestGetByUserIDAndGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "byuser@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-byuser"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("byuser@test.com", service.RoleUser) + group := s.mustCreateGroup("g-byuser") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) got, err := s.repo.GetByUserIDAndGroupID(s.ctx, user.ID, group.ID) s.Require().NoError(err, "GetByUserIDAndGroupID") @@ -141,15 +179,11 @@ func (s *UserSubscriptionRepoSuite) TestGetByUserIDAndGroupID_NotFound() { } func (s *UserSubscriptionRepoSuite) TestGetActiveByUserIDAndGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "active@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-active"}) + user := s.mustCreateUser("active@test.com", service.RoleUser) + group := s.mustCreateGroup("g-active") - // Create active subscription (future expiry) - active := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(2 * time.Hour), + active := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(2 * time.Hour)) }) got, err := s.repo.GetActiveByUserIDAndGroupID(s.ctx, user.ID, group.ID) @@ -158,15 +192,11 @@ func (s *UserSubscriptionRepoSuite) TestGetActiveByUserIDAndGroupID() { } func (s *UserSubscriptionRepoSuite) TestGetActiveByUserIDAndGroupID_ExpiredIgnored() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "expired@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-expired"}) + user := s.mustCreateUser("expired@test.com", service.RoleUser) + group := s.mustCreateGroup("g-expired") - // Create expired subscription (past expiry but active status) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(-2 * time.Hour), + s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-2 * time.Hour)) }) _, err := s.repo.GetActiveByUserIDAndGroupID(s.ctx, user.ID, group.ID) @@ -176,21 +206,14 @@ func (s *UserSubscriptionRepoSuite) TestGetActiveByUserIDAndGroupID_ExpiredIgnor // --- ListByUserID / ListActiveByUserID --- func (s *UserSubscriptionRepoSuite) TestListByUserID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listby@test.com"}) - g1 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-list1"}) - g2 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-list2"}) + user := s.mustCreateUser("listby@test.com", service.RoleUser) + g1 := s.mustCreateGroup("g-list1") + g2 := s.mustCreateGroup("g-list2") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: g1.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: g2.ID, - Status: service.SubscriptionStatusExpired, - ExpiresAt: time.Now().Add(-24 * time.Hour), + s.mustCreateSubscription(user.ID, g1.ID, nil) + s.mustCreateSubscription(user.ID, g2.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) }) subs, err := s.repo.ListByUserID(s.ctx, user.ID) @@ -202,21 +225,16 @@ func (s *UserSubscriptionRepoSuite) TestListByUserID() { } func (s *UserSubscriptionRepoSuite) TestListActiveByUserID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listactive@test.com"}) - g1 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-act1"}) - g2 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-act2"}) + user := s.mustCreateUser("listactive@test.com", service.RoleUser) + g1 := s.mustCreateGroup("g-act1") + g2 := s.mustCreateGroup("g-act2") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: g1.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), + s.mustCreateSubscription(user.ID, g1.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: g2.ID, - Status: service.SubscriptionStatusExpired, - ExpiresAt: time.Now().Add(-24 * time.Hour), + s.mustCreateSubscription(user.ID, g2.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) }) subs, err := s.repo.ListActiveByUserID(s.ctx, user.ID) @@ -228,22 +246,12 @@ func (s *UserSubscriptionRepoSuite) TestListActiveByUserID() { // --- ListByGroupID --- func (s *UserSubscriptionRepoSuite) TestListByGroupID() { - user1 := mustCreateUser(s.T(), s.db, &userModel{Email: "u1@test.com"}) - user2 := mustCreateUser(s.T(), s.db, &userModel{Email: "u2@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-listgrp"}) + user1 := s.mustCreateUser("u1@test.com", service.RoleUser) + user2 := s.mustCreateUser("u2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-listgrp") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user1.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user2.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, nil) subs, page, err := s.repo.ListByGroupID(s.ctx, group.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) s.Require().NoError(err, "ListByGroupID") @@ -258,15 +266,9 @@ func (s *UserSubscriptionRepoSuite) TestListByGroupID() { // --- List with filters --- func (s *UserSubscriptionRepoSuite) TestList_NoFilters() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "list@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-list"}) - - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("list@test.com", service.RoleUser) + group := s.mustCreateGroup("g-list") + s.mustCreateSubscription(user.ID, group.ID, nil) subs, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, "") s.Require().NoError(err, "List") @@ -275,22 +277,12 @@ func (s *UserSubscriptionRepoSuite) TestList_NoFilters() { } func (s *UserSubscriptionRepoSuite) TestList_FilterByUserID() { - user1 := mustCreateUser(s.T(), s.db, &userModel{Email: "filter1@test.com"}) - user2 := mustCreateUser(s.T(), s.db, &userModel{Email: "filter2@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-filter"}) + user1 := s.mustCreateUser("filter1@test.com", service.RoleUser) + user2 := s.mustCreateUser("filter2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-filter") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user1.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user2.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, nil) subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, &user1.ID, nil, "") s.Require().NoError(err) @@ -299,22 +291,12 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByUserID() { } func (s *UserSubscriptionRepoSuite) TestList_FilterByGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "grpfilter@test.com"}) - g1 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-f1"}) - g2 := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-f2"}) + user := s.mustCreateUser("grpfilter@test.com", service.RoleUser) + g1 := s.mustCreateGroup("g-f1") + g2 := s.mustCreateGroup("g-f2") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: g1.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: g2.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + s.mustCreateSubscription(user.ID, g1.ID, nil) + s.mustCreateSubscription(user.ID, g2.ID, nil) subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, &g1.ID, "") s.Require().NoError(err) @@ -323,20 +305,18 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByGroupID() { } func (s *UserSubscriptionRepoSuite) TestList_FilterByStatus() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "statfilter@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-stat"}) + user1 := s.mustCreateUser("statfilter1@test.com", service.RoleUser) + user2 := s.mustCreateUser("statfilter2@test.com", service.RoleUser) + group1 := s.mustCreateGroup("g-stat-1") + group2 := s.mustCreateGroup("g-stat-2") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), + s.mustCreateSubscription(user1.ID, group1.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusActive) + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusExpired, - ExpiresAt: time.Now().Add(-24 * time.Hour), + s.mustCreateSubscription(user2.ID, group2.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) }) subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, service.SubscriptionStatusExpired) @@ -348,52 +328,37 @@ func (s *UserSubscriptionRepoSuite) TestList_FilterByStatus() { // --- Usage tracking --- func (s *UserSubscriptionRepoSuite) TestIncrementUsage() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "usage@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-usage"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("usage@test.com", service.RoleUser) + group := s.mustCreateGroup("g-usage") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) err := s.repo.IncrementUsage(s.ctx, sub.ID, 1.25) s.Require().NoError(err, "IncrementUsage") got, err := s.repo.GetByID(s.ctx, sub.ID) s.Require().NoError(err) - s.Require().Equal(1.25, got.DailyUsageUSD) - s.Require().Equal(1.25, got.WeeklyUsageUSD) - s.Require().Equal(1.25, got.MonthlyUsageUSD) + s.Require().InDelta(1.25, got.DailyUsageUSD, 1e-6) + s.Require().InDelta(1.25, got.WeeklyUsageUSD, 1e-6) + s.Require().InDelta(1.25, got.MonthlyUsageUSD, 1e-6) } func (s *UserSubscriptionRepoSuite) TestIncrementUsage_Accumulates() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "accum@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-accum"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("accum@test.com", service.RoleUser) + group := s.mustCreateGroup("g-accum") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) s.Require().NoError(s.repo.IncrementUsage(s.ctx, sub.ID, 1.0)) s.Require().NoError(s.repo.IncrementUsage(s.ctx, sub.ID, 2.5)) got, err := s.repo.GetByID(s.ctx, sub.ID) s.Require().NoError(err) - s.Require().Equal(3.5, got.DailyUsageUSD) + s.Require().InDelta(3.5, got.DailyUsageUSD, 1e-6) } func (s *UserSubscriptionRepoSuite) TestActivateWindows() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "activate@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-activate"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("activate@test.com", service.RoleUser) + group := s.mustCreateGroup("g-activate") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) activateAt := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) err := s.repo.ActivateWindows(s.ctx, sub.ID, activateAt) @@ -404,19 +369,15 @@ func (s *UserSubscriptionRepoSuite) TestActivateWindows() { s.Require().NotNil(got.DailyWindowStart) s.Require().NotNil(got.WeeklyWindowStart) s.Require().NotNil(got.MonthlyWindowStart) - s.Require().True(got.DailyWindowStart.Equal(activateAt)) + s.Require().WithinDuration(activateAt, *got.DailyWindowStart, time.Microsecond) } func (s *UserSubscriptionRepoSuite) TestResetDailyUsage() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "resetd@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-resetd"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - DailyUsageUSD: 10.0, - WeeklyUsageUSD: 20.0, + user := s.mustCreateUser("resetd@test.com", service.RoleUser) + group := s.mustCreateGroup("g-resetd") + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetDailyUsageUsd(10.0) + c.SetWeeklyUsageUsd(20.0) }) resetAt := time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC) @@ -425,21 +386,18 @@ func (s *UserSubscriptionRepoSuite) TestResetDailyUsage() { got, err := s.repo.GetByID(s.ctx, sub.ID) s.Require().NoError(err) - s.Require().Zero(got.DailyUsageUSD) - s.Require().Equal(20.0, got.WeeklyUsageUSD, "weekly should remain unchanged") - s.Require().True(got.DailyWindowStart.Equal(resetAt)) + s.Require().InDelta(0.0, got.DailyUsageUSD, 1e-6) + s.Require().InDelta(20.0, got.WeeklyUsageUSD, 1e-6) + s.Require().NotNil(got.DailyWindowStart) + s.Require().WithinDuration(resetAt, *got.DailyWindowStart, time.Microsecond) } func (s *UserSubscriptionRepoSuite) TestResetWeeklyUsage() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "resetw@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-resetw"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - WeeklyUsageUSD: 15.0, - MonthlyUsageUSD: 30.0, + user := s.mustCreateUser("resetw@test.com", service.RoleUser) + group := s.mustCreateGroup("g-resetw") + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetWeeklyUsageUsd(15.0) + c.SetMonthlyUsageUsd(30.0) }) resetAt := time.Date(2025, 1, 6, 0, 0, 0, 0, time.UTC) @@ -448,20 +406,17 @@ func (s *UserSubscriptionRepoSuite) TestResetWeeklyUsage() { got, err := s.repo.GetByID(s.ctx, sub.ID) s.Require().NoError(err) - s.Require().Zero(got.WeeklyUsageUSD) - s.Require().Equal(30.0, got.MonthlyUsageUSD, "monthly should remain unchanged") - s.Require().True(got.WeeklyWindowStart.Equal(resetAt)) + s.Require().InDelta(0.0, got.WeeklyUsageUSD, 1e-6) + s.Require().InDelta(30.0, got.MonthlyUsageUSD, 1e-6) + s.Require().NotNil(got.WeeklyWindowStart) + s.Require().WithinDuration(resetAt, *got.WeeklyWindowStart, time.Microsecond) } func (s *UserSubscriptionRepoSuite) TestResetMonthlyUsage() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "resetm@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-resetm"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - MonthlyUsageUSD: 100.0, + user := s.mustCreateUser("resetm@test.com", service.RoleUser) + group := s.mustCreateGroup("g-resetm") + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetMonthlyUsageUsd(25.0) }) resetAt := time.Date(2025, 2, 1, 0, 0, 0, 0, time.UTC) @@ -470,21 +425,17 @@ func (s *UserSubscriptionRepoSuite) TestResetMonthlyUsage() { got, err := s.repo.GetByID(s.ctx, sub.ID) s.Require().NoError(err) - s.Require().Zero(got.MonthlyUsageUSD) - s.Require().True(got.MonthlyWindowStart.Equal(resetAt)) + s.Require().InDelta(0.0, got.MonthlyUsageUSD, 1e-6) + s.Require().NotNil(got.MonthlyWindowStart) + s.Require().WithinDuration(resetAt, *got.MonthlyWindowStart, time.Microsecond) } // --- UpdateStatus / ExtendExpiry / UpdateNotes --- func (s *UserSubscriptionRepoSuite) TestUpdateStatus() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "status@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-status"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("status@test.com", service.RoleUser) + group := s.mustCreateGroup("g-status") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) err := s.repo.UpdateStatus(s.ctx, sub.ID, service.SubscriptionStatusExpired) s.Require().NoError(err, "UpdateStatus") @@ -495,14 +446,9 @@ func (s *UserSubscriptionRepoSuite) TestUpdateStatus() { } func (s *UserSubscriptionRepoSuite) TestExtendExpiry() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "extend@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-extend"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("extend@test.com", service.RoleUser) + group := s.mustCreateGroup("g-extend") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) newExpiry := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) err := s.repo.ExtendExpiry(s.ctx, sub.ID, newExpiry) @@ -510,18 +456,13 @@ func (s *UserSubscriptionRepoSuite) TestExtendExpiry() { got, err := s.repo.GetByID(s.ctx, sub.ID) s.Require().NoError(err) - s.Require().True(got.ExpiresAt.Equal(newExpiry)) + s.Require().WithinDuration(newExpiry, got.ExpiresAt, time.Microsecond) } func (s *UserSubscriptionRepoSuite) TestUpdateNotes() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "notes@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-notes"}) - sub := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + user := s.mustCreateUser("notes@test.com", service.RoleUser) + group := s.mustCreateGroup("g-notes") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) err := s.repo.UpdateNotes(s.ctx, sub.ID, "VIP user") s.Require().NoError(err, "UpdateNotes") @@ -534,20 +475,15 @@ func (s *UserSubscriptionRepoSuite) TestUpdateNotes() { // --- ListExpired / BatchUpdateExpiredStatus --- func (s *UserSubscriptionRepoSuite) TestListExpired() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "listexp@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-listexp"}) + user := s.mustCreateUser("listexp@test.com", service.RoleUser) + groupActive := s.mustCreateGroup("g-listexp-active") + groupExpired := s.mustCreateGroup("g-listexp-expired") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), + s.mustCreateSubscription(user.ID, groupActive.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(-24 * time.Hour), + s.mustCreateSubscription(user.ID, groupExpired.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) }) expired, err := s.repo.ListExpired(s.ctx) @@ -556,20 +492,15 @@ func (s *UserSubscriptionRepoSuite) TestListExpired() { } func (s *UserSubscriptionRepoSuite) TestBatchUpdateExpiredStatus() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "batch@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-batch"}) + user := s.mustCreateUser("batch@test.com", service.RoleUser) + groupFuture := s.mustCreateGroup("g-batch-future") + groupPast := s.mustCreateGroup("g-batch-past") - active := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), + active := s.mustCreateSubscription(user.ID, groupFuture.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) }) - expiredActive := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(-24 * time.Hour), + expiredActive := s.mustCreateSubscription(user.ID, groupPast.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) }) affected, err := s.repo.BatchUpdateExpiredStatus(s.ctx) @@ -586,15 +517,10 @@ func (s *UserSubscriptionRepoSuite) TestBatchUpdateExpiredStatus() { // --- ExistsByUserIDAndGroupID --- func (s *UserSubscriptionRepoSuite) TestExistsByUserIDAndGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "exists@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-exists"}) + user := s.mustCreateUser("exists@test.com", service.RoleUser) + group := s.mustCreateGroup("g-exists") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) + s.mustCreateSubscription(user.ID, group.ID, nil) exists, err := s.repo.ExistsByUserIDAndGroupID(s.ctx, user.ID, group.ID) s.Require().NoError(err, "ExistsByUserIDAndGroupID") @@ -608,21 +534,14 @@ func (s *UserSubscriptionRepoSuite) TestExistsByUserIDAndGroupID() { // --- CountByGroupID / CountActiveByGroupID --- func (s *UserSubscriptionRepoSuite) TestCountByGroupID() { - user1 := mustCreateUser(s.T(), s.db, &userModel{Email: "cnt1@test.com"}) - user2 := mustCreateUser(s.T(), s.db, &userModel{Email: "cnt2@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-count"}) + user1 := s.mustCreateUser("cnt1@test.com", service.RoleUser) + user2 := s.mustCreateUser("cnt2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-count") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user1.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user2.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusExpired, - ExpiresAt: time.Now().Add(-24 * time.Hour), + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) }) count, err := s.repo.CountByGroupID(s.ctx, group.ID) @@ -631,21 +550,15 @@ func (s *UserSubscriptionRepoSuite) TestCountByGroupID() { } func (s *UserSubscriptionRepoSuite) TestCountActiveByGroupID() { - user1 := mustCreateUser(s.T(), s.db, &userModel{Email: "cntact1@test.com"}) - user2 := mustCreateUser(s.T(), s.db, &userModel{Email: "cntact2@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-cntact"}) + user1 := s.mustCreateUser("cntact1@test.com", service.RoleUser) + user2 := s.mustCreateUser("cntact2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-cntact") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user1.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), + s.mustCreateSubscription(user1.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user2.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(-24 * time.Hour), // expired by time + s.mustCreateSubscription(user2.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) // expired by time }) count, err := s.repo.CountActiveByGroupID(s.ctx, group.ID) @@ -656,21 +569,12 @@ func (s *UserSubscriptionRepoSuite) TestCountActiveByGroupID() { // --- DeleteByGroupID --- func (s *UserSubscriptionRepoSuite) TestDeleteByGroupID() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "delgrp@test.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-delgrp"}) + user1 := s.mustCreateUser("delgrp1@test.com", service.RoleUser) + user2 := s.mustCreateUser("delgrp2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-delgrp") - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusExpired, - ExpiresAt: time.Now().Add(-24 * time.Hour), - }) + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, nil) affected, err := s.repo.DeleteByGroupID(s.ctx, group.ID) s.Require().NoError(err, "DeleteByGroupID") @@ -680,26 +584,21 @@ func (s *UserSubscriptionRepoSuite) TestDeleteByGroupID() { s.Require().Zero(count) } -// --- Combined original test --- +// --- Combined scenario --- func (s *UserSubscriptionRepoSuite) TestActiveExpiredBoundaries_UsageAndReset_BatchUpdateExpiredStatus() { - user := mustCreateUser(s.T(), s.db, &userModel{Email: "subr@example.com"}) - group := mustCreateGroup(s.T(), s.db, &groupModel{Name: "g-subr"}) + user := s.mustCreateUser("subr@example.com", service.RoleUser) + groupActive := s.mustCreateGroup("g-subr-active") + groupExpired := s.mustCreateGroup("g-subr-expired") - active := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(2 * time.Hour), + active := s.mustCreateSubscription(user.ID, groupActive.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(2 * time.Hour)) }) - expiredActive := mustCreateSubscription(s.T(), s.db, &userSubscriptionModel{ - UserID: user.ID, - GroupID: group.ID, - Status: service.SubscriptionStatusActive, - ExpiresAt: time.Now().Add(-2 * time.Hour), + expiredActive := s.mustCreateSubscription(user.ID, groupExpired.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-2 * time.Hour)) }) - got, err := s.repo.GetActiveByUserIDAndGroupID(s.ctx, user.ID, group.ID) + got, err := s.repo.GetActiveByUserIDAndGroupID(s.ctx, user.ID, groupActive.ID) s.Require().NoError(err, "GetActiveByUserIDAndGroupID") s.Require().Equal(active.ID, got.ID, "expected active subscription") @@ -709,9 +608,9 @@ func (s *UserSubscriptionRepoSuite) TestActiveExpiredBoundaries_UsageAndReset_Ba after, err := s.repo.GetByID(s.ctx, active.ID) s.Require().NoError(err, "GetByID") - s.Require().Equal(1.25, after.DailyUsageUSD, "DailyUsageUSD mismatch") - s.Require().Equal(1.25, after.WeeklyUsageUSD, "WeeklyUsageUSD mismatch") - s.Require().Equal(1.25, after.MonthlyUsageUSD, "MonthlyUsageUSD mismatch") + s.Require().InDelta(1.25, after.DailyUsageUSD, 1e-6) + s.Require().InDelta(1.25, after.WeeklyUsageUSD, 1e-6) + s.Require().InDelta(1.25, after.MonthlyUsageUSD, 1e-6) s.Require().NotNil(after.DailyWindowStart, "expected DailyWindowStart activated") s.Require().NotNil(after.WeeklyWindowStart, "expected WeeklyWindowStart activated") s.Require().NotNil(after.MonthlyWindowStart, "expected MonthlyWindowStart activated") @@ -720,14 +619,16 @@ func (s *UserSubscriptionRepoSuite) TestActiveExpiredBoundaries_UsageAndReset_Ba s.Require().NoError(s.repo.ResetDailyUsage(s.ctx, active.ID, resetAt), "ResetDailyUsage") afterReset, err := s.repo.GetByID(s.ctx, active.ID) s.Require().NoError(err, "GetByID after reset") - s.Require().Equal(0.0, afterReset.DailyUsageUSD, "expected daily usage reset to 0") - s.Require().NotNil(afterReset.DailyWindowStart, "expected DailyWindowStart not nil") - s.Require().True(afterReset.DailyWindowStart.Equal(resetAt), "expected daily window start updated") + s.Require().InDelta(0.0, afterReset.DailyUsageUSD, 1e-6) + s.Require().NotNil(afterReset.DailyWindowStart) + s.Require().WithinDuration(resetAt, *afterReset.DailyWindowStart, time.Microsecond) affected, err := s.repo.BatchUpdateExpiredStatus(s.ctx) s.Require().NoError(err, "BatchUpdateExpiredStatus") s.Require().Equal(int64(1), affected, "expected 1 affected row") + updated, err := s.repo.GetByID(s.ctx, expiredActive.ID) s.Require().NoError(err, "GetByID expired") s.Require().Equal(service.SubscriptionStatusExpired, updated.Status, "expected status expired") } + diff --git a/backend/internal/server/middleware/api_key_auth_google_test.go b/backend/internal/server/middleware/api_key_auth_google_test.go new file mode 100644 index 00000000..6f3ade9e --- /dev/null +++ b/backend/internal/server/middleware/api_key_auth_google_test.go @@ -0,0 +1,214 @@ +package middleware + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type fakeApiKeyRepo struct { + getByKey func(ctx context.Context, key string) (*service.ApiKey, error) +} + +func (f fakeApiKeyRepo) Create(ctx context.Context, key *service.ApiKey) error { return errors.New("not implemented") } +func (f fakeApiKeyRepo) GetByID(ctx context.Context, id int64) (*service.ApiKey, error) { + return nil, errors.New("not implemented") +} +func (f fakeApiKeyRepo) GetByKey(ctx context.Context, key string) (*service.ApiKey, error) { + if f.getByKey == nil { + return nil, errors.New("unexpected call") + } + return f.getByKey(ctx, key) +} +func (f fakeApiKeyRepo) Update(ctx context.Context, key *service.ApiKey) error { return errors.New("not implemented") } +func (f fakeApiKeyRepo) Delete(ctx context.Context, id int64) error { return errors.New("not implemented") } +func (f fakeApiKeyRepo) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.ApiKey, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (f fakeApiKeyRepo) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + return nil, errors.New("not implemented") +} +func (f fakeApiKeyRepo) CountByUserID(ctx context.Context, userID int64) (int64, error) { return 0, errors.New("not implemented") } +func (f fakeApiKeyRepo) ExistsByKey(ctx context.Context, key string) (bool, error) { return false, errors.New("not implemented") } +func (f fakeApiKeyRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.ApiKey, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (f fakeApiKeyRepo) SearchApiKeys(ctx context.Context, userID int64, keyword string, limit int) ([]service.ApiKey, error) { + return nil, errors.New("not implemented") +} +func (f fakeApiKeyRepo) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} +func (f fakeApiKeyRepo) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +type googleErrorResponse struct { + Error struct { + Code int `json:"code"` + Message string `json:"message"` + Status string `json:"status"` + } `json:"error"` +} + +func newTestApiKeyService(repo service.ApiKeyRepository) *service.ApiKeyService { + return service.NewApiKeyService( + repo, + nil, // userRepo (unused in GetByKey) + nil, // groupRepo + nil, // userSubRepo + nil, // cache + &config.Config{}, + ) +} + +func TestApiKeyAuthWithSubscriptionGoogle_MissingKey(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestApiKeyService(fakeApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.ApiKey, error) { + return nil, errors.New("should not be called") + }, + }) + r.Use(ApiKeyAuthWithSubscriptionGoogle(apiKeyService, nil)) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusUnauthorized, resp.Error.Code) + require.Equal(t, "API key is required", resp.Error.Message) + require.Equal(t, "UNAUTHENTICATED", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_InvalidKey(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestApiKeyService(fakeApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.ApiKey, error) { + return nil, service.ErrApiKeyNotFound + }, + }) + r.Use(ApiKeyAuthWithSubscriptionGoogle(apiKeyService, nil)) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer invalid") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusUnauthorized, resp.Error.Code) + require.Equal(t, "Invalid API key", resp.Error.Message) + require.Equal(t, "UNAUTHENTICATED", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_RepoError(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestApiKeyService(fakeApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.ApiKey, error) { + return nil, errors.New("db down") + }, + }) + r.Use(ApiKeyAuthWithSubscriptionGoogle(apiKeyService, nil)) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer any") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusInternalServerError, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusInternalServerError, resp.Error.Code) + require.Equal(t, "Failed to validate API key", resp.Error.Message) + require.Equal(t, "INTERNAL", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_DisabledKey(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestApiKeyService(fakeApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.ApiKey, error) { + return &service.ApiKey{ + ID: 1, + Key: key, + Status: service.StatusDisabled, + User: &service.User{ + ID: 123, + Status: service.StatusActive, + }, + }, nil + }, + }) + r.Use(ApiKeyAuthWithSubscriptionGoogle(apiKeyService, nil)) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer disabled") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusUnauthorized, resp.Error.Code) + require.Equal(t, "API key is disabled", resp.Error.Message) + require.Equal(t, "UNAUTHENTICATED", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_InsufficientBalance(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestApiKeyService(fakeApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.ApiKey, error) { + return &service.ApiKey{ + ID: 1, + Key: key, + Status: service.StatusActive, + User: &service.User{ + ID: 123, + Status: service.StatusActive, + Balance: 0, + }, + }, nil + }, + }) + r.Use(ApiKeyAuthWithSubscriptionGoogle(apiKeyService, nil)) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer ok") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusForbidden, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusForbidden, resp.Error.Code) + require.Equal(t, "Insufficient account balance", resp.Error.Message) + require.Equal(t, "PERMISSION_DENIED", resp.Error.Status) +} diff --git a/backend/internal/setup/setup.go b/backend/internal/setup/setup.go index 387077bb..759b930c 100644 --- a/backend/internal/setup/setup.go +++ b/backend/internal/setup/setup.go @@ -3,6 +3,7 @@ package setup import ( "context" "crypto/rand" + "database/sql" "encoding/hex" "fmt" "log" @@ -10,13 +11,12 @@ import ( "strconv" "time" - "github.com/Wei-Shaw/sub2api/internal/repository" + "github.com/Wei-Shaw/sub2api/internal/infrastructure" "github.com/Wei-Shaw/sub2api/internal/service" + _ "github.com/lib/pq" "github.com/redis/go-redis/v9" "gopkg.in/yaml.v3" - "gorm.io/driver/postgres" - "gorm.io/gorm" ) // Config paths @@ -92,20 +92,16 @@ func TestDatabaseConnection(cfg *DatabaseConfig) error { cfg.Host, cfg.Port, cfg.User, cfg.Password, cfg.SSLMode, ) - db, err := gorm.Open(postgres.Open(defaultDSN), &gorm.Config{}) + db, err := sql.Open("postgres", defaultDSN) if err != nil { return fmt.Errorf("failed to connect to PostgreSQL: %w", err) } - sqlDB, err := db.DB() - if err != nil { - return fmt.Errorf("failed to get db instance: %w", err) - } defer func() { - if sqlDB == nil { + if db == nil { return } - if err := sqlDB.Close(); err != nil { + if err := db.Close(); err != nil { log.Printf("failed to close postgres connection: %v", err) } }() @@ -113,22 +109,23 @@ func TestDatabaseConnection(cfg *DatabaseConfig) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - if err := sqlDB.PingContext(ctx); err != nil { + if err := db.PingContext(ctx); err != nil { return fmt.Errorf("ping failed: %w", err) } // Check if target database exists var exists bool - row := sqlDB.QueryRowContext(ctx, "SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = $1)", cfg.DBName) + row := db.QueryRowContext(ctx, "SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = $1)", cfg.DBName) if err := row.Scan(&exists); err != nil { return fmt.Errorf("failed to check database existence: %w", err) } // Create database if not exists if !exists { + // 注意:数据库名不能参数化,依赖前置输入校验保障安全。 // Note: Database names cannot be parameterized, but we've already validated cfg.DBName // in the handler using validateDBName() which only allows [a-zA-Z][a-zA-Z0-9_]* - _, err := sqlDB.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s", cfg.DBName)) + _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s", cfg.DBName)) if err != nil { return fmt.Errorf("failed to create database '%s': %w", cfg.DBName, err) } @@ -136,27 +133,23 @@ func TestDatabaseConnection(cfg *DatabaseConfig) error { } // Now connect to the target database to verify - if err := sqlDB.Close(); err != nil { + if err := db.Close(); err != nil { log.Printf("failed to close postgres connection: %v", err) } - sqlDB = nil + db = nil targetDSN := fmt.Sprintf( "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s", cfg.Host, cfg.Port, cfg.User, cfg.Password, cfg.DBName, cfg.SSLMode, ) - targetDB, err := gorm.Open(postgres.Open(targetDSN), &gorm.Config{}) + targetDB, err := sql.Open("postgres", targetDSN) if err != nil { return fmt.Errorf("failed to connect to database '%s': %w", cfg.DBName, err) } - targetSqlDB, err := targetDB.DB() - if err != nil { - return fmt.Errorf("failed to get target db instance: %w", err) - } defer func() { - if err := targetSqlDB.Close(); err != nil { + if err := targetDB.Close(); err != nil { log.Printf("failed to close postgres connection: %v", err) } }() @@ -164,7 +157,7 @@ func TestDatabaseConnection(cfg *DatabaseConfig) error { ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) defer cancel2() - if err := targetSqlDB.PingContext(ctx2); err != nil { + if err := targetDB.PingContext(ctx2); err != nil { return fmt.Errorf("ping target database failed: %w", err) } @@ -256,22 +249,18 @@ func initializeDatabase(cfg *SetupConfig) error { cfg.Database.Password, cfg.Database.DBName, cfg.Database.SSLMode, ) - db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{}) + db, err := sql.Open("postgres", dsn) if err != nil { return err } - sqlDB, err := db.DB() - if err != nil { - return err - } defer func() { - if err := sqlDB.Close(); err != nil { + if err := db.Close(); err != nil { log.Printf("failed to close postgres connection: %v", err) } }() - return repository.AutoMigrate(db) + return infrastructure.ApplyMigrations(context.Background(), db) } func createAdminUser(cfg *SetupConfig) error { @@ -281,24 +270,24 @@ func createAdminUser(cfg *SetupConfig) error { cfg.Database.Password, cfg.Database.DBName, cfg.Database.SSLMode, ) - db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{}) + db, err := sql.Open("postgres", dsn) if err != nil { return err } - sqlDB, err := db.DB() - if err != nil { - return err - } defer func() { - if err := sqlDB.Close(); err != nil { + if err := db.Close(); err != nil { log.Printf("failed to close postgres connection: %v", err) } }() + // 使用超时上下文避免安装流程因数据库异常而长时间阻塞。 + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + // Check if admin already exists var count int64 - if err := db.Table("users").Where("role = ?", service.RoleAdmin).Count(&count).Error; err != nil { + if err := db.QueryRowContext(ctx, "SELECT COUNT(1) FROM users WHERE role = $1", service.RoleAdmin).Scan(&count); err != nil { return err } if count > 0 { @@ -319,7 +308,20 @@ func createAdminUser(cfg *SetupConfig) error { return err } - return repository.NewUserRepository(db).Create(context.Background(), admin) + _, err = db.ExecContext( + ctx, + `INSERT INTO users (email, password_hash, role, balance, concurrency, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, + admin.Email, + admin.PasswordHash, + admin.Role, + admin.Balance, + admin.Concurrency, + admin.Status, + admin.CreatedAt, + admin.UpdatedAt, + ) + return err } func writeConfigFile(cfg *SetupConfig) error { @@ -339,7 +341,10 @@ func writeConfigFile(cfg *SetupConfig) error { ExpireHour int `yaml:"expire_hour"` } `yaml:"jwt"` Default struct { - GroupID uint `yaml:"group_id"` + UserConcurrency int `yaml:"user_concurrency"` + UserBalance float64 `yaml:"user_balance"` + ApiKeyPrefix string `yaml:"api_key_prefix"` + RateMultiplier float64 `yaml:"rate_multiplier"` } `yaml:"default"` RateLimit struct { RequestsPerMinute int `yaml:"requests_per_minute"` @@ -358,9 +363,15 @@ func writeConfigFile(cfg *SetupConfig) error { ExpireHour: cfg.JWT.ExpireHour, }, Default: struct { - GroupID uint `yaml:"group_id"` + UserConcurrency int `yaml:"user_concurrency"` + UserBalance float64 `yaml:"user_balance"` + ApiKeyPrefix string `yaml:"api_key_prefix"` + RateMultiplier float64 `yaml:"rate_multiplier"` }{ - GroupID: 1, + UserConcurrency: 5, + UserBalance: 0, + ApiKeyPrefix: "sk-", + RateMultiplier: 1.0, }, RateLimit: struct { RequestsPerMinute int `yaml:"requests_per_minute"` diff --git a/backend/migrations/001_init.sql b/backend/migrations/001_init.sql index fea194c0..64078c42 100644 --- a/backend/migrations/001_init.sql +++ b/backend/migrations/001_init.sql @@ -170,14 +170,3 @@ CREATE INDEX IF NOT EXISTS idx_usage_logs_account_id ON usage_logs(account_id); CREATE INDEX IF NOT EXISTS idx_usage_logs_model ON usage_logs(model); CREATE INDEX IF NOT EXISTS idx_usage_logs_created_at ON usage_logs(created_at); CREATE INDEX IF NOT EXISTS idx_usage_logs_user_created ON usage_logs(user_id, created_at); - --- 插入默认管理员用户 --- 密码: admin123 (bcrypt hash) -INSERT INTO users (email, password_hash, role, balance, concurrency, status) -VALUES ('admin@sub2api.com', '$2a$10$N9qo8uLOickgx2ZMRZoMye.IjJbDdJeCo0U2bBPJj9lS/5LqD.C.C', 'admin', 0, 10, 'active') -ON CONFLICT (email) DO NOTHING; - --- 插入默认分组 -INSERT INTO groups (name, description, rate_multiplier, is_exclusive, status) -VALUES ('default', '默认分组', 1.0, false, 'active') -ON CONFLICT (name) DO NOTHING; diff --git a/backend/migrations/005_schema_parity.sql b/backend/migrations/005_schema_parity.sql new file mode 100644 index 00000000..0ee3f121 --- /dev/null +++ b/backend/migrations/005_schema_parity.sql @@ -0,0 +1,42 @@ +-- Align SQL migrations with current GORM persistence models. +-- This file is designed to be safe on both fresh installs and existing databases. + +-- users: add fields added after initial migration +ALTER TABLE users ADD COLUMN IF NOT EXISTS username VARCHAR(100) NOT NULL DEFAULT ''; +ALTER TABLE users ADD COLUMN IF NOT EXISTS wechat VARCHAR(100) NOT NULL DEFAULT ''; +ALTER TABLE users ADD COLUMN IF NOT EXISTS notes TEXT NOT NULL DEFAULT ''; + +-- api_keys: allow longer keys (GORM model uses size:128) +ALTER TABLE api_keys ALTER COLUMN key TYPE VARCHAR(128); + +-- accounts: scheduling and rate-limit fields used by repository queries +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS schedulable BOOLEAN NOT NULL DEFAULT TRUE; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS rate_limited_at TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS rate_limit_reset_at TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS overload_until TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_start TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_end TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_status VARCHAR(20); + +CREATE INDEX IF NOT EXISTS idx_accounts_schedulable ON accounts(schedulable); +CREATE INDEX IF NOT EXISTS idx_accounts_rate_limited_at ON accounts(rate_limited_at); +CREATE INDEX IF NOT EXISTS idx_accounts_rate_limit_reset_at ON accounts(rate_limit_reset_at); +CREATE INDEX IF NOT EXISTS idx_accounts_overload_until ON accounts(overload_until); + +-- redeem_codes: subscription redeem fields +ALTER TABLE redeem_codes ADD COLUMN IF NOT EXISTS group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL; +ALTER TABLE redeem_codes ADD COLUMN IF NOT EXISTS validity_days INT NOT NULL DEFAULT 30; +CREATE INDEX IF NOT EXISTS idx_redeem_codes_group_id ON redeem_codes(group_id); + +-- usage_logs: billing type used by filters and stats +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS billing_type SMALLINT NOT NULL DEFAULT 0; +CREATE INDEX IF NOT EXISTS idx_usage_logs_billing_type ON usage_logs(billing_type); + +-- settings: key-value store +CREATE TABLE IF NOT EXISTS settings ( + id BIGSERIAL PRIMARY KEY, + key VARCHAR(100) NOT NULL UNIQUE, + value TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + diff --git a/backend/migrations/006_fix_invalid_subscription_expires_at.sql b/backend/migrations/006_fix_invalid_subscription_expires_at.sql new file mode 100644 index 00000000..7a0c2642 --- /dev/null +++ b/backend/migrations/006_fix_invalid_subscription_expires_at.sql @@ -0,0 +1,10 @@ +-- Fix legacy subscription records with invalid expires_at (year > 2099). +DO $$ +BEGIN + IF to_regclass('public.user_subscriptions') IS NOT NULL THEN + UPDATE user_subscriptions + SET expires_at = TIMESTAMPTZ '2099-12-31 23:59:59+00' + WHERE expires_at > TIMESTAMPTZ '2099-12-31 23:59:59+00'; + END IF; +END $$; + diff --git a/backend/migrations/007_add_user_allowed_groups.sql b/backend/migrations/007_add_user_allowed_groups.sql new file mode 100644 index 00000000..a61400d2 --- /dev/null +++ b/backend/migrations/007_add_user_allowed_groups.sql @@ -0,0 +1,20 @@ +-- Add user_allowed_groups join table to replace users.allowed_groups (BIGINT[]). +-- Phase 1: create table + backfill from the legacy array column. + +CREATE TABLE IF NOT EXISTS user_allowed_groups ( + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, group_id) +); + +CREATE INDEX IF NOT EXISTS idx_user_allowed_groups_group_id ON user_allowed_groups(group_id); + +-- Backfill from the legacy users.allowed_groups array. +INSERT INTO user_allowed_groups (user_id, group_id) +SELECT u.id, x.group_id +FROM users u +CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id) +JOIN groups g ON g.id = x.group_id +WHERE u.allowed_groups IS NOT NULL +ON CONFLICT DO NOTHING; diff --git a/backend/migrations/migrations.go b/backend/migrations/migrations.go new file mode 100644 index 00000000..3cab7b03 --- /dev/null +++ b/backend/migrations/migrations.go @@ -0,0 +1,34 @@ +// Package migrations 包含嵌入的 SQL 数据库迁移文件。 +// +// 该包使用 Go 1.16+ 的 embed 功能将 SQL 文件嵌入到编译后的二进制文件中。 +// 这种方式的优点: +// - 部署时无需额外的迁移文件 +// - 迁移文件与代码版本一致 +// - 便于版本控制和代码审查 +package migrations + +import "embed" + +// FS 包含本目录下所有嵌入的 SQL 迁移文件。 +// +// 迁移命名规范: +// - 使用零填充的数字前缀确保正确的执行顺序 +// - 格式:NNN_description.sql(如 001_init.sql, 002_add_users.sql) +// - 描述部分使用下划线分隔的小写单词 +// +// 迁移文件要求: +// - 必须是幂等的(可重复执行而不产生错误) +// - 推荐使用 IF NOT EXISTS / IF EXISTS 语法 +// - 一旦应用,不应修改已有的迁移文件(通过 checksum 校验) +// +// 示例迁移文件: +// +// -- 001_init.sql +// CREATE TABLE IF NOT EXISTS users ( +// id BIGSERIAL PRIMARY KEY, +// email VARCHAR(255) NOT NULL UNIQUE, +// created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +// ); +// +//go:embed *.sql +var FS embed.FS diff --git a/backend/tools.go b/backend/tools.go index fc19d5ce..f06d2c78 100644 --- a/backend/tools.go +++ b/backend/tools.go @@ -4,5 +4,6 @@ package tools import ( + _ "entgo.io/ent/cmd/ent" _ "github.com/google/wire/cmd/wire" ) diff --git a/deploy/README.md b/deploy/README.md index 86f88f19..24b6d067 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -51,7 +51,7 @@ When using Docker Compose with `AUTO_SETUP=true`: 1. On first run, the system automatically: - Connects to PostgreSQL and Redis - - Creates all database tables + - Applies database migrations (SQL files in `backend/migrations/*.sql`) and records them in `schema_migrations` - Generates JWT secret (if not provided) - Creates admin account (password auto-generated if not provided) - Writes config.yaml @@ -63,6 +63,30 @@ When using Docker Compose with `AUTO_SETUP=true`: docker-compose logs sub2api | grep "admin password" ``` +### Database Migration Notes (PostgreSQL) + +- Migrations are applied in lexicographic order (e.g. `001_...sql`, `002_...sql`). +- `schema_migrations` tracks applied migrations (filename + checksum). +- Migrations are forward-only; rollback requires a DB backup restore or a manual compensating SQL script. + +**Verify `users.allowed_groups` → `user_allowed_groups` backfill** + +During the incremental GORM→Ent migration, `users.allowed_groups` (legacy `BIGINT[]`) is being replaced by a normalized join table `user_allowed_groups(user_id, group_id)`. + +Run this query to compare the legacy data vs the join table: + +```sql +WITH old_pairs AS ( + SELECT DISTINCT u.id AS user_id, x.group_id + FROM users u + CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id) + WHERE u.allowed_groups IS NOT NULL +) +SELECT + (SELECT COUNT(*) FROM old_pairs) AS old_pair_count, + (SELECT COUNT(*) FROM user_allowed_groups) AS new_pair_count; +``` + ### Commands ```bash diff --git a/openspec/AGENTS.md b/openspec/AGENTS.md new file mode 100644 index 00000000..96ab0bb3 --- /dev/null +++ b/openspec/AGENTS.md @@ -0,0 +1,456 @@ +# OpenSpec Instructions + +Instructions for AI coding assistants using OpenSpec for spec-driven development. + +## TL;DR Quick Checklist + +- Search existing work: `openspec spec list --long`, `openspec list` (use `rg` only for full-text search) +- Decide scope: new capability vs modify existing capability +- Pick a unique `change-id`: kebab-case, verb-led (`add-`, `update-`, `remove-`, `refactor-`) +- Scaffold: `proposal.md`, `tasks.md`, `design.md` (only if needed), and delta specs per affected capability +- Write deltas: use `## ADDED|MODIFIED|REMOVED|RENAMED Requirements`; include at least one `#### Scenario:` per requirement +- Validate: `openspec validate [change-id] --strict` and fix issues +- Request approval: Do not start implementation until proposal is approved + +## Three-Stage Workflow + +### Stage 1: Creating Changes +Create proposal when you need to: +- Add features or functionality +- Make breaking changes (API, schema) +- Change architecture or patterns +- Optimize performance (changes behavior) +- Update security patterns + +Triggers (examples): +- "Help me create a change proposal" +- "Help me plan a change" +- "Help me create a proposal" +- "I want to create a spec proposal" +- "I want to create a spec" + +Loose matching guidance: +- Contains one of: `proposal`, `change`, `spec` +- With one of: `create`, `plan`, `make`, `start`, `help` + +Skip proposal for: +- Bug fixes (restore intended behavior) +- Typos, formatting, comments +- Dependency updates (non-breaking) +- Configuration changes +- Tests for existing behavior + +**Workflow** +1. Review `openspec/project.md`, `openspec list`, and `openspec list --specs` to understand current context. +2. Choose a unique verb-led `change-id` and scaffold `proposal.md`, `tasks.md`, optional `design.md`, and spec deltas under `openspec/changes//`. +3. Draft spec deltas using `## ADDED|MODIFIED|REMOVED Requirements` with at least one `#### Scenario:` per requirement. +4. Run `openspec validate --strict` and resolve any issues before sharing the proposal. + +### Stage 2: Implementing Changes +Track these steps as TODOs and complete them one by one. +1. **Read proposal.md** - Understand what's being built +2. **Read design.md** (if exists) - Review technical decisions +3. **Read tasks.md** - Get implementation checklist +4. **Implement tasks sequentially** - Complete in order +5. **Confirm completion** - Ensure every item in `tasks.md` is finished before updating statuses +6. **Update checklist** - After all work is done, set every task to `- [x]` so the list reflects reality +7. **Approval gate** - Do not start implementation until the proposal is reviewed and approved + +### Stage 3: Archiving Changes +After deployment, create separate PR to: +- Move `changes/[name]/` → `changes/archive/YYYY-MM-DD-[name]/` +- Update `specs/` if capabilities changed +- Use `openspec archive --skip-specs --yes` for tooling-only changes (always pass the change ID explicitly) +- Run `openspec validate --strict` to confirm the archived change passes checks + +## Before Any Task + +**Context Checklist:** +- [ ] Read relevant specs in `specs/[capability]/spec.md` +- [ ] Check pending changes in `changes/` for conflicts +- [ ] Read `openspec/project.md` for conventions +- [ ] Run `openspec list` to see active changes +- [ ] Run `openspec list --specs` to see existing capabilities + +**Before Creating Specs:** +- Always check if capability already exists +- Prefer modifying existing specs over creating duplicates +- Use `openspec show [spec]` to review current state +- If request is ambiguous, ask 1–2 clarifying questions before scaffolding + +### Search Guidance +- Enumerate specs: `openspec spec list --long` (or `--json` for scripts) +- Enumerate changes: `openspec list` (or `openspec change list --json` - deprecated but available) +- Show details: + - Spec: `openspec show --type spec` (use `--json` for filters) + - Change: `openspec show --json --deltas-only` +- Full-text search (use ripgrep): `rg -n "Requirement:|Scenario:" openspec/specs` + +## Quick Start + +### CLI Commands + +```bash +# Essential commands +openspec list # List active changes +openspec list --specs # List specifications +openspec show [item] # Display change or spec +openspec validate [item] # Validate changes or specs +openspec archive [--yes|-y] # Archive after deployment (add --yes for non-interactive runs) + +# Project management +openspec init [path] # Initialize OpenSpec +openspec update [path] # Update instruction files + +# Interactive mode +openspec show # Prompts for selection +openspec validate # Bulk validation mode + +# Debugging +openspec show [change] --json --deltas-only +openspec validate [change] --strict +``` + +### Command Flags + +- `--json` - Machine-readable output +- `--type change|spec` - Disambiguate items +- `--strict` - Comprehensive validation +- `--no-interactive` - Disable prompts +- `--skip-specs` - Archive without spec updates +- `--yes`/`-y` - Skip confirmation prompts (non-interactive archive) + +## Directory Structure + +``` +openspec/ +├── project.md # Project conventions +├── specs/ # Current truth - what IS built +│ └── [capability]/ # Single focused capability +│ ├── spec.md # Requirements and scenarios +│ └── design.md # Technical patterns +├── changes/ # Proposals - what SHOULD change +│ ├── [change-name]/ +│ │ ├── proposal.md # Why, what, impact +│ │ ├── tasks.md # Implementation checklist +│ │ ├── design.md # Technical decisions (optional; see criteria) +│ │ └── specs/ # Delta changes +│ │ └── [capability]/ +│ │ └── spec.md # ADDED/MODIFIED/REMOVED +│ └── archive/ # Completed changes +``` + +## Creating Change Proposals + +### Decision Tree + +``` +New request? +├─ Bug fix restoring spec behavior? → Fix directly +├─ Typo/format/comment? → Fix directly +├─ New feature/capability? → Create proposal +├─ Breaking change? → Create proposal +├─ Architecture change? → Create proposal +└─ Unclear? → Create proposal (safer) +``` + +### Proposal Structure + +1. **Create directory:** `changes/[change-id]/` (kebab-case, verb-led, unique) + +2. **Write proposal.md:** +```markdown +# Change: [Brief description of change] + +## Why +[1-2 sentences on problem/opportunity] + +## What Changes +- [Bullet list of changes] +- [Mark breaking changes with **BREAKING**] + +## Impact +- Affected specs: [list capabilities] +- Affected code: [key files/systems] +``` + +3. **Create spec deltas:** `specs/[capability]/spec.md` +```markdown +## ADDED Requirements +### Requirement: New Feature +The system SHALL provide... + +#### Scenario: Success case +- **WHEN** user performs action +- **THEN** expected result + +## MODIFIED Requirements +### Requirement: Existing Feature +[Complete modified requirement] + +## REMOVED Requirements +### Requirement: Old Feature +**Reason**: [Why removing] +**Migration**: [How to handle] +``` +If multiple capabilities are affected, create multiple delta files under `changes/[change-id]/specs//spec.md`—one per capability. + +4. **Create tasks.md:** +```markdown +## 1. Implementation +- [ ] 1.1 Create database schema +- [ ] 1.2 Implement API endpoint +- [ ] 1.3 Add frontend component +- [ ] 1.4 Write tests +``` + +5. **Create design.md when needed:** +Create `design.md` if any of the following apply; otherwise omit it: +- Cross-cutting change (multiple services/modules) or a new architectural pattern +- New external dependency or significant data model changes +- Security, performance, or migration complexity +- Ambiguity that benefits from technical decisions before coding + +Minimal `design.md` skeleton: +```markdown +## Context +[Background, constraints, stakeholders] + +## Goals / Non-Goals +- Goals: [...] +- Non-Goals: [...] + +## Decisions +- Decision: [What and why] +- Alternatives considered: [Options + rationale] + +## Risks / Trade-offs +- [Risk] → Mitigation + +## Migration Plan +[Steps, rollback] + +## Open Questions +- [...] +``` + +## Spec File Format + +### Critical: Scenario Formatting + +**CORRECT** (use #### headers): +```markdown +#### Scenario: User login success +- **WHEN** valid credentials provided +- **THEN** return JWT token +``` + +**WRONG** (don't use bullets or bold): +```markdown +- **Scenario: User login** ❌ +**Scenario**: User login ❌ +### Scenario: User login ❌ +``` + +Every requirement MUST have at least one scenario. + +### Requirement Wording +- Use SHALL/MUST for normative requirements (avoid should/may unless intentionally non-normative) + +### Delta Operations + +- `## ADDED Requirements` - New capabilities +- `## MODIFIED Requirements` - Changed behavior +- `## REMOVED Requirements` - Deprecated features +- `## RENAMED Requirements` - Name changes + +Headers matched with `trim(header)` - whitespace ignored. + +#### When to use ADDED vs MODIFIED +- ADDED: Introduces a new capability or sub-capability that can stand alone as a requirement. Prefer ADDED when the change is orthogonal (e.g., adding "Slash Command Configuration") rather than altering the semantics of an existing requirement. +- MODIFIED: Changes the behavior, scope, or acceptance criteria of an existing requirement. Always paste the full, updated requirement content (header + all scenarios). The archiver will replace the entire requirement with what you provide here; partial deltas will drop previous details. +- RENAMED: Use when only the name changes. If you also change behavior, use RENAMED (name) plus MODIFIED (content) referencing the new name. + +Common pitfall: Using MODIFIED to add a new concern without including the previous text. This causes loss of detail at archive time. If you aren’t explicitly changing the existing requirement, add a new requirement under ADDED instead. + +Authoring a MODIFIED requirement correctly: +1) Locate the existing requirement in `openspec/specs//spec.md`. +2) Copy the entire requirement block (from `### Requirement: ...` through its scenarios). +3) Paste it under `## MODIFIED Requirements` and edit to reflect the new behavior. +4) Ensure the header text matches exactly (whitespace-insensitive) and keep at least one `#### Scenario:`. + +Example for RENAMED: +```markdown +## RENAMED Requirements +- FROM: `### Requirement: Login` +- TO: `### Requirement: User Authentication` +``` + +## Troubleshooting + +### Common Errors + +**"Change must have at least one delta"** +- Check `changes/[name]/specs/` exists with .md files +- Verify files have operation prefixes (## ADDED Requirements) + +**"Requirement must have at least one scenario"** +- Check scenarios use `#### Scenario:` format (4 hashtags) +- Don't use bullet points or bold for scenario headers + +**Silent scenario parsing failures** +- Exact format required: `#### Scenario: Name` +- Debug with: `openspec show [change] --json --deltas-only` + +### Validation Tips + +```bash +# Always use strict mode for comprehensive checks +openspec validate [change] --strict + +# Debug delta parsing +openspec show [change] --json | jq '.deltas' + +# Check specific requirement +openspec show [spec] --json -r 1 +``` + +## Happy Path Script + +```bash +# 1) Explore current state +openspec spec list --long +openspec list +# Optional full-text search: +# rg -n "Requirement:|Scenario:" openspec/specs +# rg -n "^#|Requirement:" openspec/changes + +# 2) Choose change id and scaffold +CHANGE=add-two-factor-auth +mkdir -p openspec/changes/$CHANGE/{specs/auth} +printf "## Why\n...\n\n## What Changes\n- ...\n\n## Impact\n- ...\n" > openspec/changes/$CHANGE/proposal.md +printf "## 1. Implementation\n- [ ] 1.1 ...\n" > openspec/changes/$CHANGE/tasks.md + +# 3) Add deltas (example) +cat > openspec/changes/$CHANGE/specs/auth/spec.md << 'EOF' +## ADDED Requirements +### Requirement: Two-Factor Authentication +Users MUST provide a second factor during login. + +#### Scenario: OTP required +- **WHEN** valid credentials are provided +- **THEN** an OTP challenge is required +EOF + +# 4) Validate +openspec validate $CHANGE --strict +``` + +## Multi-Capability Example + +``` +openspec/changes/add-2fa-notify/ +├── proposal.md +├── tasks.md +└── specs/ + ├── auth/ + │ └── spec.md # ADDED: Two-Factor Authentication + └── notifications/ + └── spec.md # ADDED: OTP email notification +``` + +auth/spec.md +```markdown +## ADDED Requirements +### Requirement: Two-Factor Authentication +... +``` + +notifications/spec.md +```markdown +## ADDED Requirements +### Requirement: OTP Email Notification +... +``` + +## Best Practices + +### Simplicity First +- Default to <100 lines of new code +- Single-file implementations until proven insufficient +- Avoid frameworks without clear justification +- Choose boring, proven patterns + +### Complexity Triggers +Only add complexity with: +- Performance data showing current solution too slow +- Concrete scale requirements (>1000 users, >100MB data) +- Multiple proven use cases requiring abstraction + +### Clear References +- Use `file.ts:42` format for code locations +- Reference specs as `specs/auth/spec.md` +- Link related changes and PRs + +### Capability Naming +- Use verb-noun: `user-auth`, `payment-capture` +- Single purpose per capability +- 10-minute understandability rule +- Split if description needs "AND" + +### Change ID Naming +- Use kebab-case, short and descriptive: `add-two-factor-auth` +- Prefer verb-led prefixes: `add-`, `update-`, `remove-`, `refactor-` +- Ensure uniqueness; if taken, append `-2`, `-3`, etc. + +## Tool Selection Guide + +| Task | Tool | Why | +|------|------|-----| +| Find files by pattern | Glob | Fast pattern matching | +| Search code content | Grep | Optimized regex search | +| Read specific files | Read | Direct file access | +| Explore unknown scope | Task | Multi-step investigation | + +## Error Recovery + +### Change Conflicts +1. Run `openspec list` to see active changes +2. Check for overlapping specs +3. Coordinate with change owners +4. Consider combining proposals + +### Validation Failures +1. Run with `--strict` flag +2. Check JSON output for details +3. Verify spec file format +4. Ensure scenarios properly formatted + +### Missing Context +1. Read project.md first +2. Check related specs +3. Review recent archives +4. Ask for clarification + +## Quick Reference + +### Stage Indicators +- `changes/` - Proposed, not yet built +- `specs/` - Built and deployed +- `archive/` - Completed changes + +### File Purposes +- `proposal.md` - Why and what +- `tasks.md` - Implementation steps +- `design.md` - Technical decisions +- `spec.md` - Requirements and behavior + +### CLI Essentials +```bash +openspec list # What's in progress? +openspec show [item] # View details +openspec validate --strict # Is it correct? +openspec archive [--yes|-y] # Mark complete (add --yes for automation) +``` + +Remember: Specs are truth. Changes are proposals. Keep them in sync. diff --git a/openspec/changes/migrate-orm-gorm-to-ent/proposal.md b/openspec/changes/migrate-orm-gorm-to-ent/proposal.md new file mode 100644 index 00000000..66f3e189 --- /dev/null +++ b/openspec/changes/migrate-orm-gorm-to-ent/proposal.md @@ -0,0 +1,269 @@ +# Proposal: 将 GORM 迁移至 Ent(保留软删除语义) + +## Change ID +`migrate-orm-gorm-to-ent` + +## 背景 +当前后端(`backend/`)使用 GORM 作为 ORM,仓储层(`backend/internal/repository/*.go`)大量依赖字符串 SQL、`Preload`、`gorm.Expr`、`clause` 等机制。 + +为支持后续从 GORM 迁移到 Ent,本变更首先把“schema 管理”从 GORM AutoMigrate 切换为 **版本化 SQL migrations**(`backend/migrations/*.sql`)+ `schema_migrations` 记录表,避免 ORM 层隐式改表导致的不可审计/不可回滚问题,并确保空库可通过 migrations 重建得到“当前代码可运行”的 schema。 + +项目已明确: +- **生产环境依赖软删除语义**(`deleted_at` 过滤必须默认生效)。 +- 更看重 **类型安全 / 可维护性**(希望减少字符串拼接与运行期错误)。 + +因此,本变更将数据库访问从 GORM 迁移到 Ent(`entgo.io/ent`),并用 Ent 的 **Interceptor + Hook + Mixin** 实现与现有行为一致的软删除默认过滤能力(参考 Ent 官方拦截器文档中的 soft delete 模式)。 + +说明: +- Ent 的拦截器/软删除方案需要在代码生成阶段启用相关 feature(例如 `intercept`),并按 Ent 的要求在入口处引入 `ent/runtime` 以注册 schema hooks/interceptors(避免循环依赖)。 +- 本仓库的 Go module 位于 `backend/go.mod`,因此 Ent 生成代码建议放在 `backend/ent/`(例如 `backend/ent/schema/`),而不是仓库根目录。 + +落地提示: +- 入口处的实际 import 路径应以模块路径为准。以当前仓库为例,若 ent 生成目录为 `backend/ent/`,则 runtime import 形如:`github.com/Wei-Shaw/sub2api/ent/runtime`。 + +## 目标 +1. 用 Ent 替代 GORM,提升查询/更新的类型安全与可维护性。 +2. **保持现有软删除语义**:默认查询不返回软删除记录;支持显式 bypass(例如后台审计/修复任务)。 +3. 将“启动时 AutoMigrate”替换为“可审计、可控的迁移流程”(第一阶段采用 `backend/migrations/*.sql` 在部署阶段执行)。 +4. 保持 `internal/service` 与 handler 等上层不感知 ORM(继续以 repository interface 为边界)。 + +## 非目标 +- 不重写业务逻辑与对外 API 行为(除必要的错误类型映射外)。 +- 不强行把现有复杂统计 SQL(如 `usage_log_repo.go` 的趋势/CTE/聚合)全部改成 Ent Builder;这类保持 Raw/SQL Builder 更可控。 + +## 关键决策(本提案给出推荐方案) + +### 1) `users.allowed_groups`:从 Postgres array 改为关系表(推荐) +现状:`users.allowed_groups BIGINT[]`,并使用 `ANY()` / `array_remove()`(见 `user_repo.go` / `group_repo.go`)。 + +决策:新增中间表 `user_allowed_groups(user_id, group_id, created_at)`,并建立唯一约束 `(user_id, group_id)`。 + +理由: +- Ent 对 array 需要自定义类型 + 仍大量依赖 raw SQL;可维护性一般。 +- 关系表建模更“Ent-friendly”,查询/权限/过滤更清晰,后续扩展(例如允许来源、备注、有效期)更容易。 + +约束与说明: +- **不建议对该 join 表做软删除**:解绑/移除应为硬删除(否则“重新绑定”与唯一约束会引入额外复杂度)。如需审计,建议写审计日志/事件表。 +- 外键建议 `ON DELETE CASCADE`(删除 user/group 时自动清理绑定关系,语义更接近当前级联清理逻辑)。 + +兼容策略: +- Phase 1:新增表并 **从旧 array 回填**;仓储读取改从新表,写入可短期双写(可选)。 +- Phase 2:灰度确认后移除 `allowed_groups` 列与相关 SQL。 + +### 2) `account_groups`:保持复合主键,使用 Ent Edge Schema(推荐) +现状:`account_groups` 以 `(account_id, group_id)` 复合主键,并附带 `priority/created_at` 等额外字段(见 `account_repo.go`)。 + +决策:**不修改数据库表结构**,在 Ent 中将其建模为 Edge Schema(带额外字段的 M2M join entity),并将其标识符配置为复合主键(`account_id + group_id`)。 + +理由: +- 该表是典型“多对多 + 额外字段”场景,Ent 原生支持 Edge Schema,允许对 join 表做 CRUD、加 hooks/策略,并保持类型安全。 +- 避免线上 DDL(更换主键)带来的锁表风险与回滚复杂度。 +- 当前表已具备唯一性(复合主键),与 Edge Schema 的复合标识符完全匹配。 + +## 设计概览 + +### A. Ent 客户端与 DI +- 将 `ProvideDB/InitDB` 从返回 `*gorm.DB` 改为返回 `*ent.Client`(必要时同时暴露 `*sql.DB` 供 raw 统计使用)。 +- `cmd/server/wire.go` 的 cleanup 从 `db.DB().Close()` 改为 `client.Close()`。 + +### A.1 迁移边界与命名映射(必须明确) +为保证线上数据与查询语义不变,Ent schema 需要显式对齐现有表/字段: +- **表名**:使用 `users`、`api_keys`、`groups`、`accounts`、`account_groups`、`proxies`、`redeem_codes`、`settings`、`user_subscriptions`、`usage_logs` 等现有名称(不要让 Ent 默认命名生成新表)。 +- **ID 类型**:现有主键是 `BIGSERIAL`,建议 Ent 中统一用 `int64`(避免 Go 的 `int` 在 32-bit 环境或跨系统时产生隐性问题)。 +- **时间字段**:`created_at/updated_at/deleted_at` 均为 `TIMESTAMPTZ`,schema 中应显式声明 DB 类型,避免生成 `timestamp without time zone` 导致行为变化。 + +### A.2 代码生成与 feature flags(必须写死) +建议在 `backend/ent/generate.go` 固化生成命令(示例): +```go +//go:build ignore +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature intercept --feature sql/upsert ./schema +``` + +说明: +- `intercept`:用于软删除的通用拦截器工具(以及未来可复用的全局 query policy)。 +- `sql/upsert`:用于替代 GORM 的 `ON CONFLICT`(例如 `settings` 的 upsert);如果短期不迁移 upsert,可暂不启用。 + +> 生成命令与 feature flags 必须进入 CI 校验(避免“本地生成了、CI/生产没生成”的隐性差异)。 + +### B. 软删除实现(必须) +对所有需要软删除的实体: +- 在 Ent schema 中通过 `Mixin` 添加 `deleted_at`(或 `delete_time`)字段。 +- 通过 **Query Interceptor** 在查询阶段默认追加 `deleted_at IS NULL` 过滤(含 traversals)。 +- 通过 **Mutation Hook** 处理两类行为: + - 拦截 delete 操作,将 delete 变为 update:设置 `deleted_at = now()`。 + - 拦截 update 操作,默认追加 `deleted_at IS NULL` 过滤,避免软删除记录被意外更新(与当前 GORM 行为对齐)。 +- 提供 `SkipSoftDelete(ctx)`:在需要包含软删数据的查询或需要 hard delete 的管理任务中显式使用。 + +**SkipSoftDelete 推荐实现**: +```go +type softDeleteKey struct{} + +func SkipSoftDelete(ctx context.Context) context.Context { + return context.WithValue(ctx, softDeleteKey{}, true) +} + +func shouldSkipSoftDelete(ctx context.Context) bool { + v, _ := ctx.Value(softDeleteKey{}).(bool) + return v +} +``` + +**注意**:Ent 的“默认不更新软删记录”通常应通过 mutation hook 实现(而不是 query interceptor),否则容易出现“UpdateOneByID 仍可更新已软删记录”的行为差异。 + +**行为兼容性约定(建议写入测试)**: +- `Delete(id)` 对“已软删”的记录应尽量保持 **幂等**(返回成功或 rows=0,但不应抛 `NotFound` 破坏现有行为)。 +- 默认查询(列表/详情/关联加载)均不应返回软删记录。 +- 仅在明确管理/审计场景允许 hard delete(并且必须显式传递 `SkipSoftDelete(ctx)` 或使用专用方法)。 + +### B.1 Raw SQL 与事务一致性(必须遵守) +本项目存在不少事务型写操作(如 `group_repo.DeleteCascade`),并且部分逻辑使用 raw SQL(或未来保留 raw)。 + +规则: +- **事务内的 raw 写操作必须绑定到同一个事务**:优先使用 Ent 的 `tx.ExecContext(ctx, ...)` 执行 raw DML,确保与 Ent mutation 同一事务提交/回滚。 +- 避免在事务中直接使用独立注入的 `*sql.DB` 执行写操作(会绕开事务,破坏原子性)。 + +### C. 仓储层迁移策略 +优先改动“CRUD/关联加载明显”的仓储,复杂统计保持 raw: +1. `user_repo.go` / `api_key_repo.go` / `group_repo.go` / `proxy_repo.go` / `redeem_code_repo.go` / `setting_repo.go` +2. `account_repo.go`(JSONB merge、复杂筛选与 join 排序,部分保留 raw) +3. `user_subscription_repo.go`(原子增量、批量更新) +4. `usage_log_repo.go`(建议保留 Raw SQL,底层连接迁移到 `database/sql` 或 Ent driver) + +### D. 错误映射 +将 `repository/translatePersistenceError` 从 GORM error 改为: +- `ent.IsNotFound(err)` → 映射为 `service.ErrXxxNotFound` +- `ent.IsConstraintError(err)` / 驱动层 unique violation → 映射为 `service.ErrXxxExists` + +同时清理所有 GORM 错误泄漏点: +- `backend/internal/server/middleware/api_key_auth_google.go` - 已修复:改为判断 `service.ErrApiKeyNotFound`(并已有单元测试覆盖) +- `backend/internal/repository/account_repo.go:50` - 需迁移:直接判断 `gorm.ErrRecordNotFound` +- `backend/internal/repository/redeem_code_repo.go:125` - 需迁移:使用 `gorm.ErrRecordNotFound` +- `backend/internal/repository/error_translate.go:16` - 核心翻译函数,需改为 Ent 错误 + +### E. JSONB 字段处理策略 +`accounts` 表的 `credentials` 和 `extra` 字段使用 JSONB 类型,当前使用 PostgreSQL `||` 操作符进行合并更新。 + +Ent 处理方案: +- 定义自定义 `JSONMap` 类型用于 schema +- 对于简单的 JSONB 读写,使用 Ent 的 `field.JSON()` 类型 +- 对于 JSONB 合并操作(`COALESCE(credentials,'{}') || ?`),使用 raw SQL: + - **事务外**:使用 `client.ExecContext(ctx, ...)`(确保复用同一连接池与可观测性能力)。 + - **事务内**:使用 `tx.ExecContext(ctx, ...)`(确保原子性,不得绕开事务)。 +- 或者在应用层先读取、合并、再写入(需要事务保证原子性) + +### F. DECIMAL/NUMERIC 字段(必须显式确认) +当前 schema 中存在多处 `DECIMAL/NUMERIC`(例如 `users.balance`、`groups.rate_multiplier`、订阅/统计中的 cost 字段等)。GORM 当前用 `float64` 读写这些列。 + +第一阶段结论(兼容优先): +- 继续使用 `float64`,并在 Ent schema 中把字段的数据库类型显式设为 Postgres `numeric(… , …)`(避免生成 `double precision`),同时接受现有的精度风险(与当前行为一致)。 +- **精度优先(后续可选)**:改用 `decimal.Decimal`(或其他 decimal 类型)作为 Go 类型,以避免金额/费率累积误差;但会波及 `internal/service` 的字段类型与 JSON 序列化,属于更大范围重构。 + +## 数据库迁移(建议) +本仓库已存在 `backend/migrations/*.sql`,且当前数据库演进也更契合“版本化 SQL 迁移”模式,而不是在应用启动时自动改动 schema。 + +**决策(第一阶段)**:继续使用 `backend/migrations/*.sql` 作为唯一的版本化迁移来源;Ent 仅负责运行期访问,不在启动阶段自动改动 schema。 + +**可选(后续阶段)**:若团队希望更强的 schema diff/漂移检测能力,可再引入 Atlas,并与现有 SQL 迁移策略对齐后逐步迁移(但不作为第一阶段前置)。 + +重要现状说明(必须先处理): +- 历史上存在“启动期 AutoMigrate + 迁移脚本覆盖不全”的混用风险:新环境仅跑 SQL migrations 可能出现缺表/缺列。 +- 另一个高风险点是 SQL migrations 中的默认管理员/默认分组种子(如果存在固定密码/固定账号,属于明显的生产安全隐患),应当从 migrations 中移除,改为在安装流程中显式创建。 + +当前处理策略(本变更已落地的基线): +- 通过 `backend/internal/infrastructure/migrations_runner.go` 引入内置 migrations runner(`schema_migrations` + `pg_advisory_lock`),用于按文件名顺序执行 `backend/migrations/*.sql` 并记录校验和。 +- 补齐 migrations 覆盖面(新增 schema parity / legacy 数据修复迁移),确保空库执行 migrations 后即可跑通当前集成测试。 +- 移除 migrations 内的默认管理员/默认分组种子,避免固定凭据风险;管理员账号由 `internal/setup` 显式创建。 + +第一阶段至少包含: +- 新增 `user_allowed_groups` 表,并从 `users.allowed_groups` 回填数据。 +- (如需要)为所有软删表统一索引:`(deleted_at)` 或 `(deleted_at, id)`,确保默认过滤不拖慢查询。 + +### 迁移 SQL 草案(PostgreSQL) +> 以下 SQL 旨在让执行方案更“可落地”,实际落地时请按 `backend/migrations/*.sql` 拆分为可回滚步骤,并评估锁表窗口。 + +**(1) 新增 join 表:`user_allowed_groups`** +```sql +CREATE TABLE IF NOT EXISTS user_allowed_groups ( + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, group_id) +); + +CREATE INDEX IF NOT EXISTS idx_user_allowed_groups_group_id + ON user_allowed_groups(group_id); +``` + +**(2) 从 `users.allowed_groups` 回填** +```sql +INSERT INTO user_allowed_groups (user_id, group_id) +SELECT u.id, x.group_id +FROM users u +CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id) +WHERE u.allowed_groups IS NOT NULL +ON CONFLICT DO NOTHING; +``` + +**(3) 回填校验(建议在灰度/发布前跑一次)** +```sql +-- 旧列展开后的行数(去重后) vs 新表行数 +WITH old_pairs AS ( + SELECT DISTINCT u.id AS user_id, x.group_id + FROM users u + CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id) + WHERE u.allowed_groups IS NOT NULL +) +SELECT + (SELECT COUNT(*) FROM old_pairs) AS old_pair_count, + (SELECT COUNT(*) FROM user_allowed_groups) AS new_pair_count; +``` + +> Phase 2 删除 `users.allowed_groups` 列应在“代码已完全切换到新表 + 已灰度验证”之后执行,并作为单独迁移文件。 + +### Phase 2 清理计划(仅在灰度完成后执行) + +前置条件(必须同时满足): +- 应用侧 **读路径** 已完全从 `user_allowed_groups` 获取 allowed-groups(不再读取 `users.allowed_groups`)。 +- 应用侧 **写路径** 已稳定双写/已切到只写 `user_allowed_groups`(并确认线上没有写回旧列的旧版本)。 +- 运行期指标确认:allowed-groups 相关功能无报错、无权限回归(建议至少一个发布周期)。 + +执行步骤(建议): +1. 先发布“只读新表 + 仍保留旧列”的版本(兼容期),并监控一段时间。 +2. 发布“停止写旧列(只写 join 表)”的版本,并监控一段时间。 +3. 执行独立迁移(DDL): + - `ALTER TABLE users DROP COLUMN allowed_groups;` + - (可选)删除任何旧列相关的索引/约束(如果存在)。 +4. 发布“移除旧列代码路径”的版本(清理遗留 SQL,例如 `ANY(allowed_groups)`/`array_remove`)。 + +回滚策略: +- 如果在步骤 1/2 发现功能回归,可直接回滚应用版本(DB 仍向后兼容)。 +- 一旦执行步骤 3(DROP COLUMN),回滚将需要手动加回列并从 join 表回填(不推荐在线上紧急回滚时做)。 + +部署策略: +- 先跑 DB migration(兼容旧代码),再灰度切换 Ent 仓储。 +- 保留回滚路径:feature flag 或快速回切到旧版本镜像(DB 迁移需保持向后兼容)。 + +## 影响范围 +- 文件(预计修改):`backend/internal/infrastructure/*`, `backend/cmd/server/*`, `backend/internal/repository/*`, `backend/internal/setup/*`, `backend/internal/server/middleware/*` +- 依赖:新增 `entgo.io/ent`、(可选)`ariga.io/atlas`/`ent/migrate` + +## 风险与缓解 +| 风险 | 说明 | 缓解 | +| --- | --- | --- | +| 软删除语义不一致 | Ent 默认不会自动过滤软删 | 强制使用 mixin+interceptor+hook,并加集成测试覆盖"软删不可见/可 bypass" | +| Schema 迁移风险 | `allowed_groups` 需要数据变更(array→join 表) | 迁移分两阶段;migration 保持向后兼容;灰度发布 | +| 迁移脚本缺失/漂移 | 过去依赖 AutoMigrate 演进 schema,SQL migrations 可能不完整 | 在切换前补齐 migrations;新增“迁移脚本可重建全量 schema”的 CI/集成测试校验 | +| 统计 SQL 行为变化 | 迁移连接方式后可能出现 SQL 细节差异 | `usage_log_repo` 保持原 SQL,优先做黑盒回归 | +| 性能退化 | 默认过滤 soft delete 增加条件 | 为 `deleted_at` 加索引;对热点查询做 explain/压测 | +| 集成测试中断 | 测试 harness 依赖 `*gorm.DB` 事务回滚 | 优先迁移测试基础设施,改用 `*ent.Tx` 或 `*sql.Tx` | +| JSONB 合并操作 | Ent 不直接支持 PostgreSQL `\|\|` 操作符 | 使用 `client.ExecContext/tx.ExecContext` 执行 raw SQL(事务内必须用 tx),或应用层合并 | +| 行级锁 | `clause.Locking{Strength: "UPDATE"}` 需替换 | 使用 Ent 的 `ForUpdate()` 方法 | +| Upsert 语义 | `clause.OnConflict` 的等价实现 | 使用 `OnConflict().UpdateNewValues()` 或 `DoNothing()` | + +## 成功标准(验收) +1. 现有单元/集成测试通过;repository integration tests(带 Docker)通过。 +2. 软删除默认过滤行为与线上一致:任意 `Delete` 后常规查询不可见;显式 `SkipSoftDelete` 可见。 +3. `allowed_groups` 相关功能回归通过:查询/绑定/解绑/分组删除联动保持一致。 +4. 关键读写路径(API key 鉴权、账户调度、订阅扣费/限额)无行为变化,错误类型与 HTTP 状态码保持兼容。 diff --git a/openspec/changes/migrate-orm-gorm-to-ent/specs/data-access/spec.md b/openspec/changes/migrate-orm-gorm-to-ent/specs/data-access/spec.md new file mode 100644 index 00000000..008f34a0 --- /dev/null +++ b/openspec/changes/migrate-orm-gorm-to-ent/specs/data-access/spec.md @@ -0,0 +1,28 @@ +## ADDED Requirements + +### Requirement: Versioned SQL Migrations +The system MUST manage database schema changes via versioned SQL migration files under `backend/migrations/*.sql` and MUST record applied migrations in the database for auditability and idempotency. + +#### Scenario: Migrations are applied idempotently +- **GIVEN** an empty PostgreSQL database +- **WHEN** the backend initializes its database connection +- **THEN** it MUST apply all SQL migrations in lexicographic filename order +- **AND** it MUST record each applied migration in `schema_migrations` with a checksum +- **AND** a subsequent initialization MUST NOT re-apply already-recorded migrations + +### Requirement: Soft Delete Semantics +For entities that support soft delete, the system MUST preserve the existing semantics: soft-deleted rows are excluded from queries by default, and delete operations are idempotent. + +#### Scenario: Soft-deleted rows are hidden by default +- **GIVEN** a row has `deleted_at` set +- **WHEN** the backend performs a standard "list" or "get" query +- **THEN** the row MUST NOT be returned by default + +### Requirement: Allowed Groups Data Model +The system MUST migrate `users.allowed_groups` from a PostgreSQL array column to a normalized join table for type safety and maintainability. + +#### Scenario: Allowed groups are represented as relationships +- **GIVEN** a user is allowed to bind a group +- **WHEN** the user/group association is stored +- **THEN** it MUST be stored as a `(user_id, group_id)` relationship row +- **AND** removing an association MUST hard-delete that relationship row diff --git a/openspec/changes/migrate-orm-gorm-to-ent/tasks.md b/openspec/changes/migrate-orm-gorm-to-ent/tasks.md new file mode 100644 index 00000000..eabe417b --- /dev/null +++ b/openspec/changes/migrate-orm-gorm-to-ent/tasks.md @@ -0,0 +1,103 @@ +## 0. 基线确认与准备 +- [x] 0.1 梳理生产依赖的软删除表清单(所有带 `deleted_at` 的实体)。 +- [x] 0.2 盘点所有 GORM 用法:`Preload`、`Transaction`、`Locking`、`Expr`、`datatypes.JSONMap`、`Raw` 统计 SQL。 +- [x] 0.3 确认数据库为 PostgreSQL,明确迁移执行位置(部署期 vs 启动期)。 +- [x] 0.3.1 **确定迁移工具链(第一阶段)**:使用 `backend/migrations/*.sql` 作为唯一迁移来源;由内置 runner 记录 `schema_migrations`(含 checksum)。 +- [x] 0.3.2 **补齐迁移脚本覆盖面**:新增 schema parity/legacy 数据修复迁移,确保空库可重建并覆盖当前代码所需表/列(含 `settings`、`redeem_codes` 扩展列、`accounts` 调度字段、`usage_logs.billing_type` 等)。 +- [x] 0.4 **修复现有 GORM 错误处理 bug**:`api_key_auth_google.go` 已改为判断业务错误(`service.ErrApiKeyNotFound`),并补充单元测试覆盖。 + +## 1. 引入 Ent(代码生成与基础设施) +- [x] 1.1 新增 `backend/ent/` 目录(schema、生成代码、mixin),配置 `entc` 生成(go generate 或 make target)。 +- [x] 1.1.1 固化 `go:generate` 命令与 feature flags(`intercept` + `sql/upsert`,并指定 `--idtype int64`)。 +- [x] 1.2 实现 SoftDelete mixin(Query Interceptor + Mutation Hook + SkipSoftDelete(ctx)),确保默认过滤/软删 delete 语义可用。 +- [x] 1.3 改造 `backend/internal/infrastructure`:提供 `*ent.Client`;同时提供 `*sql.DB`(当前阶段通过 `gorm.DB.DB()` 暴露,供 raw SQL 使用)。 +- [x] 1.4 改造 `backend/cmd/server/wire.go` cleanup:关闭 ent client。 +- [x] 1.5 **更新 Wire 依赖注入配置**:更新所有 Provider 函数签名,从 `*gorm.DB` 改为 `*ent.Client`。 +- [x] 1.6 在服务入口引入 `backend/ent/runtime`(Ent 生成)以注册 schema hooks/interceptors(避免循环依赖导致未注册)。 + - 代码 import 示例:`github.com/Wei-Shaw/sub2api/ent/runtime` + +## 2. 数据模型与迁移(向后兼容优先) +- [x] 2.1 新增 `user_allowed_groups` 表:定义字段、索引、唯一约束;从 `users.allowed_groups` 回填数据。 +- [x] 2.1.1 为 `user_allowed_groups` 编写回填校验 SQL(old_pairs vs new_pairs),并把执行步骤写入部署文档/README。 +- [x] 2.1.2 设计 Phase 2 清理:在灰度完成后删除 `users.allowed_groups` 列(独立迁移文件,确保可回滚窗口足够)。 +- [x] 2.2 `account_groups` 保持现有复合主键,迁移为 Ent Edge Schema(无 DB 变更);补充校验:确保 `(account_id, group_id)` 唯一性在 DB 层已被约束(PK 或 Unique)。 +- [x] 2.3 为软删除字段建立必要索引(`deleted_at`)。 +- [x] 2.4 移除启动时 `AutoMigrate`,改为执行 `backend/migrations/*.sql`(对齐单一迁移来源)。 +- [x] 2.5 更新安装/初始化流程:`internal/setup` 不再调用 `repository.AutoMigrate`,改为执行 `backend/migrations/*.sql`(确保新安装环境与生产迁移链路一致)。 + +## 3. 仓储层迁移(按风险分批) + +### 3.A 低风险仓储(优先迁移,用于验证 Ent 基础设施) +- [x] 3.1 迁移 `setting_repo`:简单 CRUD + upsert(Ent `OnConflictColumns(...).UpdateNewValues()`)。 +- [x] 3.2 迁移 `proxy_repo`:CRUD + 软删除 + 账户数量统计(统计保持 raw SQL,proxy 表读写改为 Ent)。 + +### 3.B 中等风险仓储 +- [x] 3.3 迁移 `api_key_repo`:关联 eager-load(`WithUser`、`WithGroup`),错误翻译为业务错误。 +- [x] 3.4 迁移 `redeem_code_repo`:CRUD + 状态更新。 +- [x] 3.5 迁移 `group_repo`:事务、级联删除逻辑(可保留 raw SQL,但必须在 ent Tx 内执行,例如 `tx.ExecContext`,避免绕开事务)。 + - 迁移 `users.allowed_groups` 相关逻辑:在删除分组时改为 `DELETE FROM user_allowed_groups WHERE group_id = ?` + +### 3.C 高风险仓储 +- [x] 3.6 迁移 `user_repo`:CRUD、分页/过滤、余额/并发原子更新(`gorm.Expr`);allowed groups 改为 join 表实现。 + - 替换 `ANY(allowed_groups)`/`array_remove` 语义:改为对 `user_allowed_groups` 的 join/filter/delete + - 覆盖 `RemoveGroupFromAllowedGroups`:改为 `DELETE FROM user_allowed_groups WHERE group_id = ?` 并返回 rowsAffected +- [x] 3.7 迁移 `user_subscription_repo`:批量过期、用量增量更新(`gorm.Expr`)、关联预加载。 +- [x] 3.8 迁移 `account_repo`:join 表排序、JSONB merge(写操作优先用 `client.ExecContext/tx.ExecContext` 执行 raw SQL);校验 bulk update 的 rowsAffected 语义一致。 + +### 3.D 保留 Raw SQL +- [x] 3.9 `usage_log_repo` 保留原 SQL:底层改为注入/获取 `*sql.DB` 执行(例如 infrastructure 同时提供 `*sql.DB`)。 + - 识别可用 Ent Builder 的简单查询(如 `Create`、`GetByID`) + - 保留 CTE/聚合等复杂 SQL(趋势统计、Top N 等) + +## 4. 错误处理与边角清理 +- [x] 4.1 替换 `repository/error_translate.go`:用 `ent.IsNotFound/IsConstraintError` 等映射。 +- [x] 4.2 清理 GORM 泄漏点: + - [x] `middleware/api_key_auth_google.go` - 已修复:从 `gorm.ErrRecordNotFound` 判断迁移为业务错误判断 + - [x] `repository/account_repo.go:50` - 直接判断 `gorm.ErrRecordNotFound` + - [x] `repository/redeem_code_repo.go:125` - 使用 `gorm.ErrRecordNotFound` +- [x] 4.3 检查 `internal/setup/` 包是否有 GORM 依赖。 +- [x] 4.4 检查 `*_cache.go` 文件是否有潜在 GORM 依赖。 + +## 5. 测试与回归 +- [x] 5.1 **迁移测试基础设施**(优先级高): + - [x] **建表策略对齐生产(GORM 阶段)**:在 Postgres testcontainer 中执行 `backend/migrations/*.sql` 初始化 schema(不再依赖 AutoMigrate)。 + - [x] 增加“schema 对齐/可重建”校验:新增集成测试断言关键表/列存在,并验证 migrations runner 幂等性。 + - [x] 为已迁移仓储增加 Ent 事务测试工具:使用 `*sql.Tx` + Ent driver 绑定到同一事务,实现按测试用例回滚(见 `testEntSQLTx`)。 + - [x] 更新 `integration_harness_test.go`:从 `*gorm.DB` 改为 `*ent.Client` + - [x] 更新 `IntegrationDBSuite`:从 `testTx()` 返回 `*gorm.DB` 改为 `*ent.Tx` 或 `*sql.Tx` + - [x] 确保事务回滚机制在 Ent 下正常工作 +- [x] 5.2 新增软删除回归用例: + - delete 后默认不可见 + - `SkipSoftDelete(ctx)` 可见 + - 重复 delete 的幂等性(不应引入新的 `NotFound` 行为) + - hard delete 可用(仅管理场景) +- [ ] 5.3 跑全量单测 + 集成测试;重点覆盖: + - API key 鉴权 + - 订阅扣费/限额 + - 账号调度 + - 统计接口 + +## 6. 收尾(去除 GORM) +- [x] 6.1 移除 `gorm.io/*` 依赖与相关代码路径。 +- [x] 6.2 更新 README/部署文档:迁移命令、回滚策略、开发者生成代码指引。 +- [x] 6.3 清理 `go.mod` 中的 GORM 相关依赖: + - `gorm.io/gorm` + - `gorm.io/driver/postgres` + - `gorm.io/datatypes` + +## 附录:工作量参考 + +| 组件 | 代码行数 | GORM 调用点 | 复杂度 | +|------|---------|------------|--------| +| 仓储层总计 | ~13,000 行 | (待统计) | - | +| Raw SQL | - | (待统计) | 高 | +| gorm.Expr | - | (待统计) | 中 | +| 集成测试 | (待统计) | - | 高 | + +**建议迁移顺序**: +1. 测试基础设施(5.1)→ 确保后续迁移可验证 +2. 低风险仓储(3.1-3.2)→ 验证 Ent 基础设施 +3. 中等风险仓储(3.3-3.5)→ 验证关联加载和事务 +4. 高风险仓储(3.6-3.8)→ 处理复杂场景 +5. 错误处理清理(4.x)→ 统一错误映射 +6. 收尾(6.x)→ 移除 GORM diff --git a/openspec/project.md b/openspec/project.md new file mode 100644 index 00000000..3da5119d --- /dev/null +++ b/openspec/project.md @@ -0,0 +1,31 @@ +# Project Context + +## Purpose +[Describe your project's purpose and goals] + +## Tech Stack +- [List your primary technologies] +- [e.g., TypeScript, React, Node.js] + +## Project Conventions + +### Code Style +[Describe your code style preferences, formatting rules, and naming conventions] + +### Architecture Patterns +[Document your architectural decisions and patterns] + +### Testing Strategy +[Explain your testing approach and requirements] + +### Git Workflow +[Describe your branching strategy and commit conventions] + +## Domain Context +[Add domain-specific knowledge that AI assistants need to understand] + +## Important Constraints +[List any technical, business, or regulatory constraints] + +## External Dependencies +[Document key external services, APIs, or systems]